From 3adaf096758a6015ca4f733e2e49ee5528ac3cd5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Mar 2016 11:54:05 -0700 Subject: [PATCH 01/39] Settings: Cleanup placeholder replacement This change moves placeholder replacement to a pkg private class for settings. It also adds a null check when calling replacement, as settings objects can still contain null values, because we only prohibit nulls on file loading. Finally, this cleans up file and stream loading a bit to not have unnecessary exception wrapping. --- .../common/logging/LogConfigurator.java | 2 +- .../PropertyPlaceholder.java | 37 ++++------ .../common/settings/Settings.java | 35 +++++---- .../indices/analysis/HunspellService.java | 2 +- .../internal/InternalSettingsPreparer.java | 6 +- .../PropertyPlaceholderTests.java | 62 ++++++---------- .../loader/YamlSettingsLoaderTests.java | 60 ++++++++-------- .../index/analysis/AnalysisModuleTests.java | 2 +- .../index/analysis/CompoundAnalysisTests.java | 4 +- .../cloud/azure/AbstractAzureTestCase.java | 72 ------------------- .../cloud/aws/AbstractAwsTestCase.java | 7 +- .../AbstractAzureWithThirdPartyTestCase.java | 7 +- .../cloud/aws/AbstractAwsTestCase.java | 7 +- 13 files changed, 110 insertions(+), 193 deletions(-) rename core/src/main/java/org/elasticsearch/common/{property => settings}/PropertyPlaceholder.java (83%) rename core/src/test/java/org/elasticsearch/common/{property => settings}/PropertyPlaceholderTests.java (78%) delete mode 100644 plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTestCase.java diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index da628b09d2b..5ccbfe4a2dd 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -167,7 +167,7 @@ public class LogConfigurator { static void loadConfig(Path file, Settings.Builder settingsBuilder) { try { settingsBuilder.loadFromPath(file); - } catch (SettingsException | NoClassDefFoundError e) { + } catch (IOException | SettingsException | NoClassDefFoundError e) { // ignore } } diff --git a/core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java b/core/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java similarity index 83% rename from core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java rename to core/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java index 70e6807cb92..2eb7e2b8e70 100644 --- a/core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java +++ b/core/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.common.property; +package org.elasticsearch.common.settings; import org.elasticsearch.common.Strings; @@ -34,23 +34,12 @@ import java.util.Set; * Values for substitution can be supplied using a {@link Properties} instance or using a * {@link PlaceholderResolver}. */ -public class PropertyPlaceholder { +class PropertyPlaceholder { private final String placeholderPrefix; private final String placeholderSuffix; private final boolean ignoreUnresolvablePlaceholders; - /** - * Creates a new PropertyPlaceholderHelper that uses the supplied prefix and suffix. Unresolvable - * placeholders are ignored. - * - * @param placeholderPrefix the prefix that denotes the start of a placeholder. - * @param placeholderSuffix the suffix that denotes the end of a placeholder. - */ - public PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix) { - this(placeholderPrefix, placeholderSuffix, true); - } - /** * Creates a new PropertyPlaceholderHelper that uses the supplied prefix and suffix. * @@ -59,12 +48,10 @@ public class PropertyPlaceholder { * @param ignoreUnresolvablePlaceholders indicates whether unresolvable placeholders should be ignored * (true) or cause an exception (false). */ - public PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix, + PropertyPlaceholder(String placeholderPrefix, String placeholderSuffix, boolean ignoreUnresolvablePlaceholders) { - Objects.requireNonNull(placeholderPrefix, "Argument 'placeholderPrefix' must not be null."); - Objects.requireNonNull(placeholderSuffix, "Argument 'placeholderSuffix' must not be null."); - this.placeholderPrefix = placeholderPrefix; - this.placeholderSuffix = placeholderSuffix; + this.placeholderPrefix = Objects.requireNonNull(placeholderPrefix); + this.placeholderSuffix = Objects.requireNonNull(placeholderSuffix); this.ignoreUnresolvablePlaceholders = ignoreUnresolvablePlaceholders; } @@ -75,15 +62,15 @@ public class PropertyPlaceholder { * @param value the value containing the placeholders to be replaced. * @param placeholderResolver the PlaceholderResolver to use for replacement. * @return the supplied value with placeholders replaced inline. + * @throws NullPointerException if value is null */ - public String replacePlaceholders(String key, String value, PlaceholderResolver placeholderResolver) { - Objects.requireNonNull(key); - Objects.requireNonNull(value, "value can not be null for [" + key + "]"); - return parseStringValue(value, placeholderResolver, new HashSet()); + String replacePlaceholders(String value, PlaceholderResolver placeholderResolver) { + Objects.requireNonNull(value); + return parseStringValue(value, placeholderResolver, new HashSet<>()); } - protected String parseStringValue(String strVal, PlaceholderResolver placeholderResolver, - Set visitedPlaceholders) { + private String parseStringValue(String strVal, PlaceholderResolver placeholderResolver, + Set visitedPlaceholders) { StringBuilder buf = new StringBuilder(strVal); int startIndex = strVal.indexOf(this.placeholderPrefix); @@ -164,7 +151,7 @@ public class PropertyPlaceholder { * * @see PropertyPlaceholder */ - public interface PlaceholderResolver { + interface PlaceholderResolver { /** * Resolves the supplied placeholder name into the replacement value. diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index ce79bf92d20..887edf48443 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.property.PropertyPlaceholder; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.settings.loader.SettingsLoaderFactory; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -1114,26 +1113,20 @@ public final class Settings implements ToXContent { * Loads settings from a url that represents them using the * {@link SettingsLoaderFactory#loaderFromSource(String)}. */ - public Builder loadFromPath(Path path) throws SettingsException { - try { - return loadFromStream(path.getFileName().toString(), Files.newInputStream(path)); - } catch (IOException e) { - throw new SettingsException("Failed to open stream for url [" + path + "]", e); - } + public Builder loadFromPath(Path path) throws IOException { + // NOTE: loadFromStream will close the input stream + return loadFromStream(path.getFileName().toString(), Files.newInputStream(path)); } /** * Loads settings from a stream that represents them using the * {@link SettingsLoaderFactory#loaderFromSource(String)}. */ - public Builder loadFromStream(String resourceName, InputStream is) throws SettingsException { + public Builder loadFromStream(String resourceName, InputStream is) throws IOException { SettingsLoader settingsLoader = SettingsLoaderFactory.loaderFromResource(resourceName); - try { - Map loadedSettings = settingsLoader.load(Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8))); - put(loadedSettings); - } catch (Exception e) { - throw new SettingsException("Failed to load settings from [" + resourceName + "]", e); - } + // NOTE: copyToString will close the input stream + Map loadedSettings = settingsLoader.load(Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8))); + put(loadedSettings); return this; } @@ -1220,14 +1213,20 @@ public final class Settings implements ToXContent { return true; } }; - for (Map.Entry entry : new HashMap<>(map).entrySet()) { - String value = propertyPlaceholder.replacePlaceholders(entry.getKey(), entry.getValue(), placeholderResolver); + Iterator> entryItr = map.entrySet().iterator(); + while (entryItr.hasNext()) { + Map.Entry entry = entryItr.next(); + if (entry.getValue() == null) { + // a null value obviously can't be replaced + continue; + } + String value = propertyPlaceholder.replacePlaceholders(entry.getValue(), placeholderResolver); // if the values exists and has length, we should maintain it in the map // otherwise, the replace process resolved into removing it if (Strings.hasLength(value)) { - map.put(entry.getKey(), value); + entry.setValue(value); } else { - map.remove(entry.getKey()); + entryItr.remove(); } } return this; diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 75c15f09778..4450bd557b6 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -210,7 +210,7 @@ public class HunspellService extends AbstractComponent { * @param defaults The default settings for this dictionary * @return The resolved settings. */ - private static Settings loadDictionarySettings(Path dir, Settings defaults) { + private static Settings loadDictionarySettings(Path dir, Settings defaults) throws IOException { Path file = dir.resolve("settings.yml"); if (Files.exists(file)) { return Settings.settingsBuilder().loadFromPath(file).put(defaults).build(); diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index 8864a70ccdc..f9539f7c363 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -92,7 +92,11 @@ public class InternalSettingsPreparer { Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix); if (Files.exists(path)) { if (!settingsFileFound) { - output.loadFromPath(path); + try { + output.loadFromPath(path); + } catch (IOException e) { + throw new SettingsException("Failed to settings from " + path.toString(), e); + } } settingsFileFound = true; foundSuffixes.add(allowedSuffix); diff --git a/core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTests.java b/core/src/test/java/org/elasticsearch/common/settings/PropertyPlaceholderTests.java similarity index 78% rename from core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTests.java rename to core/src/test/java/org/elasticsearch/common/settings/PropertyPlaceholderTests.java index 405ac566686..78176bc1d80 100644 --- a/core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/PropertyPlaceholderTests.java @@ -17,14 +17,13 @@ * under the License. */ -package org.elasticsearch.common.property; - -import org.elasticsearch.test.ESTestCase; +package org.elasticsearch.common.settings; import java.util.LinkedHashMap; import java.util.Map; -import static org.hamcrest.Matchers.hasToString; +import org.elasticsearch.test.ESTestCase; + import static org.hamcrest.Matchers.is; public class PropertyPlaceholderTests extends ESTestCase { @@ -34,10 +33,10 @@ public class PropertyPlaceholderTests extends ESTestCase { map.put("foo1", "bar1"); map.put("foo2", "bar2"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("bar1", propertyPlaceholder.replacePlaceholders("key", "{foo1}", placeholderResolver)); - assertEquals("a bar1b", propertyPlaceholder.replacePlaceholders("key", "a {foo1}b", placeholderResolver)); - assertEquals("bar1bar2", propertyPlaceholder.replacePlaceholders("key", "{foo1}{foo2}", placeholderResolver)); - assertEquals("a bar1 b bar2 c", propertyPlaceholder.replacePlaceholders("key", "a {foo1} b {foo2} c", placeholderResolver)); + assertEquals("bar1", propertyPlaceholder.replacePlaceholders("{foo1}", placeholderResolver)); + assertEquals("a bar1b", propertyPlaceholder.replacePlaceholders("a {foo1}b", placeholderResolver)); + assertEquals("bar1bar2", propertyPlaceholder.replacePlaceholders("{foo1}{foo2}", placeholderResolver)); + assertEquals("a bar1 b bar2 c", propertyPlaceholder.replacePlaceholders("a {foo1} b {foo2} c", placeholderResolver)); } public void testVariousPrefixSuffix() { @@ -48,24 +47,24 @@ public class PropertyPlaceholderTests extends ESTestCase { Map map = new LinkedHashMap<>(); map.put("foo", "bar"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("bar", ppEqualsPrefix.replacePlaceholders("key", "{foo}", placeholderResolver)); - assertEquals("bar", ppLongerPrefix.replacePlaceholders("key", "${foo}", placeholderResolver)); - assertEquals("bar", ppShorterPrefix.replacePlaceholders("key", "{foo}}", placeholderResolver)); + assertEquals("bar", ppEqualsPrefix.replacePlaceholders("{foo}", placeholderResolver)); + assertEquals("bar", ppLongerPrefix.replacePlaceholders("${foo}", placeholderResolver)); + assertEquals("bar", ppShorterPrefix.replacePlaceholders("{foo}}", placeholderResolver)); } public void testDefaultValue() { PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false); Map map = new LinkedHashMap<>(); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("bar", propertyPlaceholder.replacePlaceholders("key", "${foo:bar}", placeholderResolver)); - assertEquals("", propertyPlaceholder.replacePlaceholders("key", "${foo:}", placeholderResolver)); + assertEquals("bar", propertyPlaceholder.replacePlaceholders("${foo:bar}", placeholderResolver)); + assertEquals("", propertyPlaceholder.replacePlaceholders("${foo:}", placeholderResolver)); } public void testIgnoredUnresolvedPlaceholder() { PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", true); Map map = new LinkedHashMap<>(); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("${foo}", propertyPlaceholder.replacePlaceholders("key", "${foo}", placeholderResolver)); + assertEquals("${foo}", propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver)); } public void testNotIgnoredUnresolvedPlaceholder() { @@ -73,7 +72,7 @@ public class PropertyPlaceholderTests extends ESTestCase { Map map = new LinkedHashMap<>(); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); try { - propertyPlaceholder.replacePlaceholders("key", "${foo}", placeholderResolver); + propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), is("Could not resolve placeholder 'foo'")); @@ -84,7 +83,7 @@ public class PropertyPlaceholderTests extends ESTestCase { PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false); Map map = new LinkedHashMap<>(); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, true, true); - assertEquals("bar", propertyPlaceholder.replacePlaceholders("key", "bar${foo}", placeholderResolver)); + assertEquals("bar", propertyPlaceholder.replacePlaceholders("bar${foo}", placeholderResolver)); } public void testRecursive() { @@ -94,8 +93,8 @@ public class PropertyPlaceholderTests extends ESTestCase { map.put("foo1", "${foo2}"); map.put("foo2", "bar"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("bar", propertyPlaceholder.replacePlaceholders("key", "${foo}", placeholderResolver)); - assertEquals("abarb", propertyPlaceholder.replacePlaceholders("key", "a${foo}b", placeholderResolver)); + assertEquals("bar", propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver)); + assertEquals("abarb", propertyPlaceholder.replacePlaceholders("a${foo}b", placeholderResolver)); } public void testNestedLongerPrefix() { @@ -106,7 +105,7 @@ public class PropertyPlaceholderTests extends ESTestCase { map.put("foo2", "bar"); map.put("barbar", "baz"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("baz", propertyPlaceholder.replacePlaceholders("key", "${bar${foo}}", placeholderResolver)); + assertEquals("baz", propertyPlaceholder.replacePlaceholders("${bar${foo}}", placeholderResolver)); } public void testNestedSameLengthPrefixSuffix() { @@ -117,7 +116,7 @@ public class PropertyPlaceholderTests extends ESTestCase { map.put("foo2", "bar"); map.put("barbar", "baz"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("baz", propertyPlaceholder.replacePlaceholders("key", "{bar{foo}}", placeholderResolver)); + assertEquals("baz", propertyPlaceholder.replacePlaceholders("{bar{foo}}", placeholderResolver)); } public void testNestedShorterPrefix() { @@ -128,7 +127,7 @@ public class PropertyPlaceholderTests extends ESTestCase { map.put("foo2", "bar"); map.put("barbar", "baz"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); - assertEquals("baz", propertyPlaceholder.replacePlaceholders("key", "{bar{foo}}}}", placeholderResolver)); + assertEquals("baz", propertyPlaceholder.replacePlaceholders("{bar{foo}}}}", placeholderResolver)); } public void testCircularReference() { @@ -138,7 +137,7 @@ public class PropertyPlaceholderTests extends ESTestCase { map.put("bar", "${foo}"); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false, true); try { - propertyPlaceholder.replacePlaceholders("key", "${foo}", placeholderResolver); + propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver); fail("Expected IllegalArgumentException"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), is("Circular placeholder reference 'foo' in property definitions")); @@ -149,24 +148,7 @@ public class PropertyPlaceholderTests extends ESTestCase { PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false); Map map = new LinkedHashMap<>(); PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, true, false); - assertEquals("bar${foo}", propertyPlaceholder.replacePlaceholders("key", "bar${foo}", placeholderResolver)); - } - - public void testNullKey() { - final PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false); - final Map map = new LinkedHashMap<>(); - final PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, true, false); - expectThrows(NullPointerException.class, () -> propertyPlaceholder.replacePlaceholders(null, "value", placeholderResolver)); - } - - public void testNullValue() { - final PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false); - final Map map = new LinkedHashMap<>(); - final PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, true, false); - final String key = randomAsciiOfLength(10); - NullPointerException e = - expectThrows(NullPointerException.class, () -> propertyPlaceholder.replacePlaceholders(key, null, placeholderResolver)); - assertThat(e, hasToString("java.lang.NullPointerException: value can not be null for [" + key + "]")); + assertEquals("bar${foo}", propertyPlaceholder.replacePlaceholders("bar${foo}", placeholderResolver)); } private class SimplePlaceholderResolver implements PropertyPlaceholder.PlaceholderResolver { diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java index 2e2a187da0b..67db756cb3d 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java @@ -19,6 +19,11 @@ package org.elasticsearch.common.settings.loader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -48,42 +53,39 @@ public class YamlSettingsLoaderTests extends ESTestCase { assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2")); } - public void testIndentation() { - final String yaml = "/org/elasticsearch/common/settings/loader/indentation-settings.yml"; - final SettingsException e = - expectThrows( - SettingsException.class, - () -> settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)).build()); - assertThat(e.getMessage(), containsString("Failed to load settings")); + public void testIndentation() throws Exception { + String yaml = "/org/elasticsearch/common/settings/loader/indentation-settings.yml"; + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> { + settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)); + }); + assertTrue(e.getMessage(), e.getMessage().contains("malformed")); } - public void testIndentationWithExplicitDocumentStart() { - final String yaml = "/org/elasticsearch/common/settings/loader/indentation-with-explicit-document-start-settings.yml"; - final SettingsException e = - expectThrows( - SettingsException.class, - () -> settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)).build()); - assertThat(e.getMessage(), containsString("Failed to load settings")); + public void testIndentationWithExplicitDocumentStart() throws Exception { + String yaml = "/org/elasticsearch/common/settings/loader/indentation-with-explicit-document-start-settings.yml"; + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> { + settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)); + }); + assertTrue(e.getMessage(), e.getMessage().contains("malformed")); } public void testDuplicateKeysThrowsException() { - final String yaml = "foo: bar\nfoo: baz"; - final SettingsException e = expectThrows(SettingsException.class, () -> settingsBuilder().loadFromSource(yaml).build()); + String yaml = "foo: bar\nfoo: baz"; + SettingsException e = expectThrows(SettingsException.class, () -> { + settingsBuilder().loadFromSource(yaml); + }); assertEquals(e.getCause().getClass(), ElasticsearchParseException.class); - assertThat( - e.toString(), - containsString("duplicate settings key [foo] " + - "found at line number [2], " + - "column number [6], " + - "previous value [bar], " + - "current value [baz]")); + String msg = e.getCause().getMessage(); + assertTrue(msg, msg.contains("duplicate settings key [foo] found")); + assertTrue(msg, msg.contains("previous value [bar], current value [baz]")); } - public void testNullValuedSettingThrowsException() { - final String yaml = "foo:"; - final ElasticsearchParseException e = - expectThrows(ElasticsearchParseException.class, () -> new YamlSettingsLoader(false).load(yaml)); - assertThat(e.toString(), containsString("null-valued setting found for key [foo] found at line number [1], column number [5]")); + public void testMissingValue() throws Exception { + Path tmp = createTempFile("test", ".yaml"); + Files.write(tmp, Collections.singletonList("foo: # missing value\n"), StandardCharsets.UTF_8); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> { + settingsBuilder().loadFromPath(tmp); + }); + assertTrue(e.getMessage(), e.getMessage().contains("null-valued setting found for key [foo]")); } - } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java index 6468fae9397..e1d35039f02 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java @@ -79,7 +79,7 @@ public class AnalysisModuleTests extends ModuleTestCase { Collections.emptyMap(), Collections.singletonMap("myfilter", MyFilterTokenFilterFactory::new), Collections.emptyMap(), Collections.emptyMap()); } - private Settings loadFromClasspath(String path) { + private Settings loadFromClasspath(String path) throws IOException { return settingsBuilder().loadFromStream(path, getClass().getResourceAsStream(path)) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java index fbedf42d083..fe5b0855798 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java @@ -91,7 +91,7 @@ public class CompoundAnalysisTests extends ESTestCase { return terms; } - private Settings getJsonSettings() { + private Settings getJsonSettings() throws IOException { String json = "/org/elasticsearch/index/analysis/test1.json"; return settingsBuilder() .loadFromStream(json, getClass().getResourceAsStream(json)) @@ -100,7 +100,7 @@ public class CompoundAnalysisTests extends ESTestCase { .build(); } - private Settings getYamlSettings() { + private Settings getYamlSettings() throws IOException { String yaml = "/org/elasticsearch/index/analysis/test1.yml"; return settingsBuilder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml)) diff --git a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTestCase.java b/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTestCase.java deleted file mode 100644 index ad7140f5020..00000000000 --- a/plugins/discovery-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureTestCase.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cloud.azure; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ThirdParty; - -import java.util.Collection; - -/** - * Base class for Azure tests that require credentials. - *

- * You must specify {@code -Dtests.thirdparty=true -Dtests.config=/path/to/config} - * in order to run these tests. - */ -@ThirdParty -public abstract class AbstractAzureTestCase extends ESIntegTestCase { - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(readSettingsFromFile()) - .build(); - } - - @Override - protected Collection> nodePlugins() { - return pluginList(AzureDiscoveryPlugin.class); - } - - protected Settings readSettingsFromFile() { - Settings.Builder settings = Settings.builder(); - settings.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()); - - // if explicit, just load it and don't load from env - try { - if (Strings.hasText(System.getProperty("tests.config"))) { - settings.loadFromPath(PathUtils.get((System.getProperty("tests.config")))); - } else { - throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); - } - } catch (SettingsException exception) { - throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception); - } - return settings.build(); - } -} diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java index cc9b0897600..dc794038598 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import java.io.IOException; import java.util.Collection; /** @@ -52,7 +53,11 @@ public abstract class AbstractAwsTestCase extends ESIntegTestCase { // if explicit, just load it and don't load from env try { if (Strings.hasText(System.getProperty("tests.config"))) { - settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); + try { + settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); + } catch (IOException e) { + throw new IllegalArgumentException("could not load aws tests config", e); + } } else { throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyTestCase.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyTestCase.java index b7c2d373a58..7136befeaff 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyTestCase.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AbstractAzureWithThirdPartyTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import java.io.IOException; import java.util.Collection; /** @@ -58,7 +59,11 @@ public abstract class AbstractAzureWithThirdPartyTestCase extends AbstractAzureT // if explicit, just load it and don't load from env try { if (Strings.hasText(System.getProperty("tests.config"))) { - settings.loadFromPath(PathUtils.get((System.getProperty("tests.config")))); + try { + settings.loadFromPath(PathUtils.get((System.getProperty("tests.config")))); + } catch (IOException e) { + throw new IllegalArgumentException("could not load azure tests config", e); + } } else { throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java index ec8fb902d66..9d1768db58b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTestCase.java @@ -29,6 +29,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ThirdParty; +import java.io.IOException; import java.util.Collection; /** @@ -52,7 +53,11 @@ public abstract class AbstractAwsTestCase extends ESIntegTestCase { // if explicit, just load it and don't load from env try { if (Strings.hasText(System.getProperty("tests.config"))) { - settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); + try { + settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); + } catch (IOException e) { + throw new IllegalArgumentException("could not load aws tests config", e); + } } else { throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); } From 6380560dbb989242e48f68719fe6045c51f7017e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 11 Apr 2016 14:35:37 -0700 Subject: [PATCH 02/39] Check more complete exception message for missing setting value --- .../common/settings/loader/YamlSettingsLoaderTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java index f3926ebe07e..618209cf114 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java @@ -85,6 +85,6 @@ public class YamlSettingsLoaderTests extends ESTestCase { ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> { Settings.builder().loadFromPath(tmp); }); - assertTrue(e.getMessage(), e.getMessage().contains("null-valued setting found for key [foo]")); + assertTrue(e.getMessage(), e.getMessage().contains("null-valued setting found for key [foo] found at line")); } } From 2d8030f09fd0ef71c3511a162db01d27a725161a Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Fri, 27 May 2016 13:50:38 -0700 Subject: [PATCH 03/39] Added foreach in grammar. --- .../src/main/antlr/PainlessParser.g4 | 1 + .../painless/antlr/PainlessParser.java | 697 ++++++++++-------- .../antlr/PainlessParserBaseVisitor.java | 7 + .../painless/antlr/PainlessParserVisitor.java | 7 + 4 files changed, 388 insertions(+), 324 deletions(-) diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index 5125e7b29a6..dad8db62117 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -33,6 +33,7 @@ statement | WHILE LP expression RP ( trailer | empty ) # while | DO block WHILE LP expression RP delimiter # do | FOR LP initializer? SEMICOLON expression? SEMICOLON afterthought? RP ( trailer | empty ) # for + | FOR LP decltype ID COLON expression RP trailer # each | declaration delimiter # decl | CONTINUE delimiter # continue | BREAK delimiter # break diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index 98eb16732fd..16dfb031707 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -356,6 +356,28 @@ class PainlessParser extends Parser { else return visitor.visitChildren(this); } } + public static class EachContext extends StatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EachContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEach(this); + else return visitor.visitChildren(this); + } + } public static class ReturnContext extends StatementContext { public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); } public ExpressionContext expression() { @@ -377,7 +399,7 @@ class PainlessParser extends Parser { enterRule(_localctx, 2, RULE_statement); try { int _alt; - setState(121); + setState(130); switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) { case 1: _localctx = new IfContext(_localctx); @@ -520,56 +542,78 @@ class PainlessParser extends Parser { } break; case 5: - _localctx = new DeclContext(_localctx); + _localctx = new EachContext(_localctx); enterOuterAlt(_localctx, 5); { setState(96); - declaration(); + match(FOR); setState(97); - delimiter(); + match(LP); + setState(98); + decltype(); + setState(99); + match(ID); + setState(100); + match(COLON); + setState(101); + expression(0); + setState(102); + match(RP); + setState(103); + trailer(); } break; case 6: - _localctx = new ContinueContext(_localctx); + _localctx = new DeclContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(99); - match(CONTINUE); - setState(100); + setState(105); + declaration(); + setState(106); delimiter(); } break; case 7: - _localctx = new BreakContext(_localctx); + _localctx = new ContinueContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(101); - match(BREAK); - setState(102); + setState(108); + match(CONTINUE); + setState(109); delimiter(); } break; case 8: - _localctx = new ReturnContext(_localctx); + _localctx = new BreakContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(103); - match(RETURN); - setState(104); - expression(0); - setState(105); + setState(110); + match(BREAK); + setState(111); delimiter(); } break; case 9: - _localctx = new TryContext(_localctx); + _localctx = new ReturnContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(107); + setState(112); + match(RETURN); + setState(113); + expression(0); + setState(114); + delimiter(); + } + break; + case 10: + _localctx = new TryContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(116); match(TRY); - setState(108); + setState(117); block(); - setState(110); + setState(119); _errHandler.sync(this); _alt = 1; do { @@ -577,7 +621,7 @@ class PainlessParser extends Parser { case 1: { { - setState(109); + setState(118); trap(); } } @@ -585,31 +629,31 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(112); + setState(121); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,7,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; - case 10: + case 11: _localctx = new ThrowContext(_localctx); - enterOuterAlt(_localctx, 10); + enterOuterAlt(_localctx, 11); { - setState(114); + setState(123); match(THROW); - setState(115); + setState(124); expression(0); - setState(116); + setState(125); delimiter(); } break; - case 11: + case 12: _localctx = new ExprContext(_localctx); - enterOuterAlt(_localctx, 11); + enterOuterAlt(_localctx, 12); { - setState(118); + setState(127); expression(0); - setState(119); + setState(128); delimiter(); } break; @@ -648,19 +692,19 @@ class PainlessParser extends Parser { TrailerContext _localctx = new TrailerContext(_ctx, getState()); enterRule(_localctx, 4, RULE_trailer); try { - setState(125); + setState(134); switch ( getInterpreter().adaptivePredict(_input,9,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(123); + setState(132); block(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(124); + setState(133); statement(); } break; @@ -704,25 +748,25 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(127); + setState(136); match(LBRACK); - setState(131); + setState(140); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,10,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(128); + setState(137); statement(); } } } - setState(133); + setState(142); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,10,_ctx); } - setState(134); + setState(143); match(RBRACK); } } @@ -756,7 +800,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(136); + setState(145); match(SEMICOLON); } } @@ -793,19 +837,19 @@ class PainlessParser extends Parser { InitializerContext _localctx = new InitializerContext(_ctx, getState()); enterRule(_localctx, 10, RULE_initializer); try { - setState(140); + setState(149); switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(138); + setState(147); declaration(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(139); + setState(148); expression(0); } break; @@ -843,7 +887,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(142); + setState(151); expression(0); } } @@ -890,23 +934,23 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(144); + setState(153); decltype(); - setState(145); + setState(154); declvar(); - setState(150); + setState(159); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(146); + setState(155); match(COMMA); - setState(147); + setState(156); declvar(); } } - setState(152); + setState(161); _errHandler.sync(this); _la = _input.LA(1); } @@ -951,21 +995,21 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(153); + setState(162); match(TYPE); - setState(158); + setState(167); _errHandler.sync(this); _la = _input.LA(1); while (_la==LBRACE) { { { - setState(154); + setState(163); match(LBRACE); - setState(155); + setState(164); match(RBRACE); } } - setState(160); + setState(169); _errHandler.sync(this); _la = _input.LA(1); } @@ -1003,11 +1047,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(161); + setState(170); match(TYPE); - setState(162); + setState(171); match(REF); - setState(163); + setState(172); match(ID); } } @@ -1046,15 +1090,15 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(165); + setState(174); match(ID); - setState(168); + setState(177); _la = _input.LA(1); if (_la==ASSIGN) { { - setState(166); + setState(175); match(ASSIGN); - setState(167); + setState(176); expression(0); } } @@ -1098,17 +1142,17 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(170); + setState(179); match(CATCH); - setState(171); + setState(180); match(LP); - setState(172); + setState(181); match(TYPE); - setState(173); + setState(182); match(ID); - setState(174); + setState(183); match(RP); - setState(175); + setState(184); block(); } } @@ -1144,7 +1188,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(177); + setState(186); _la = _input.LA(1); if ( !(_la==EOF || _la==SEMICOLON) ) { _errHandler.recoverInline(this); @@ -1313,7 +1357,7 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(188); + setState(197); switch ( getInterpreter().adaptivePredict(_input,15,_ctx) ) { case 1: { @@ -1321,16 +1365,16 @@ class PainlessParser extends Parser { _ctx = _localctx; _prevctx = _localctx; - setState(180); + setState(189); chain(true); - setState(181); + setState(190); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ASSIGN) | (1L << AADD) | (1L << ASUB) | (1L << AMUL) | (1L << ADIV) | (1L << AREM) | (1L << AAND) | (1L << AXOR) | (1L << AOR) | (1L << ALSH) | (1L << ARSH) | (1L << AUSH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(182); + setState(191); expression(1); ((AssignmentContext)_localctx).s = false; } @@ -1340,14 +1384,14 @@ class PainlessParser extends Parser { _localctx = new SingleContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(185); + setState(194); ((SingleContext)_localctx).u = unary(false); ((SingleContext)_localctx).s = ((SingleContext)_localctx).u.s; } break; } _ctx.stop = _input.LT(-1); - setState(249); + setState(258); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,17,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1355,22 +1399,22 @@ class PainlessParser extends Parser { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(247); + setState(256); switch ( getInterpreter().adaptivePredict(_input,16,_ctx) ) { case 1: { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(190); + setState(199); if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); - setState(191); + setState(200); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(192); + setState(201); expression(13); ((BinaryContext)_localctx).s = false; } @@ -1379,16 +1423,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(195); + setState(204); if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(196); + setState(205); _la = _input.LA(1); if ( !(_la==ADD || _la==SUB) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(197); + setState(206); expression(12); ((BinaryContext)_localctx).s = false; } @@ -1397,16 +1441,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(200); + setState(209); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(201); + setState(210); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(202); + setState(211); expression(11); ((BinaryContext)_localctx).s = false; } @@ -1415,16 +1459,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(205); + setState(214); if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(206); + setState(215); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(207); + setState(216); expression(10); ((CompContext)_localctx).s = false; } @@ -1433,16 +1477,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(210); + setState(219); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(211); + setState(220); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(212); + setState(221); expression(9); ((CompContext)_localctx).s = false; } @@ -1451,11 +1495,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(215); + setState(224); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(216); + setState(225); match(BWAND); - setState(217); + setState(226); expression(8); ((BinaryContext)_localctx).s = false; } @@ -1464,11 +1508,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(220); + setState(229); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); - setState(221); + setState(230); match(XOR); - setState(222); + setState(231); expression(7); ((BinaryContext)_localctx).s = false; } @@ -1477,11 +1521,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(225); + setState(234); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(226); + setState(235); match(BWOR); - setState(227); + setState(236); expression(6); ((BinaryContext)_localctx).s = false; } @@ -1490,11 +1534,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(230); + setState(239); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(231); + setState(240); match(BOOLAND); - setState(232); + setState(241); expression(5); ((BoolContext)_localctx).s = false; } @@ -1503,11 +1547,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(235); + setState(244); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(236); + setState(245); match(BOOLOR); - setState(237); + setState(246); expression(4); ((BoolContext)_localctx).s = false; } @@ -1516,15 +1560,15 @@ class PainlessParser extends Parser { { _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(240); + setState(249); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(241); + setState(250); match(COND); - setState(242); + setState(251); ((ConditionalContext)_localctx).e0 = expression(0); - setState(243); + setState(252); match(COLON); - setState(244); + setState(253); ((ConditionalContext)_localctx).e1 = expression(2); ((ConditionalContext)_localctx).s = ((ConditionalContext)_localctx).e0.s && ((ConditionalContext)_localctx).e1.s; } @@ -1532,7 +1576,7 @@ class PainlessParser extends Parser { } } } - setState(251); + setState(260); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,17,_ctx); } @@ -1679,22 +1723,22 @@ class PainlessParser extends Parser { enterRule(_localctx, 28, RULE_unary); int _la; try { - setState(281); + setState(290); switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { case 1: _localctx = new PreContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(252); + setState(261); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(253); + setState(262); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(254); + setState(263); chain(true); } break; @@ -1702,11 +1746,11 @@ class PainlessParser extends Parser { _localctx = new PostContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(255); + setState(264); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(256); + setState(265); chain(true); - setState(257); + setState(266); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); @@ -1719,9 +1763,9 @@ class PainlessParser extends Parser { _localctx = new ReadContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(259); + setState(268); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(260); + setState(269); chain(false); } break; @@ -1729,9 +1773,9 @@ class PainlessParser extends Parser { _localctx = new NumericContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(261); + setState(270); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(262); + setState(271); _la = _input.LA(1); if ( !(((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (OCTAL - 64)) | (1L << (HEX - 64)) | (1L << (INTEGER - 64)) | (1L << (DECIMAL - 64)))) != 0)) ) { _errHandler.recoverInline(this); @@ -1745,9 +1789,9 @@ class PainlessParser extends Parser { _localctx = new TrueContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(264); + setState(273); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(265); + setState(274); match(TRUE); ((TrueContext)_localctx).s = false; } @@ -1756,9 +1800,9 @@ class PainlessParser extends Parser { _localctx = new FalseContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(267); + setState(276); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(268); + setState(277); match(FALSE); ((FalseContext)_localctx).s = false; } @@ -1767,9 +1811,9 @@ class PainlessParser extends Parser { _localctx = new NullContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(270); + setState(279); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(271); + setState(280); match(NULL); ((NullContext)_localctx).s = false; } @@ -1778,16 +1822,16 @@ class PainlessParser extends Parser { _localctx = new OperatorContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(273); + setState(282); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(274); + setState(283); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(275); + setState(284); unary(false); } break; @@ -1795,13 +1839,13 @@ class PainlessParser extends Parser { _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(276); + setState(285); match(LP); - setState(277); + setState(286); decltype(); - setState(278); + setState(287); match(RP); - setState(279); + setState(288); unary(_localctx.c); } break; @@ -1910,27 +1954,27 @@ class PainlessParser extends Parser { enterRule(_localctx, 30, RULE_chain); try { int _alt; - setState(317); + setState(326); switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { case 1: _localctx = new DynamicContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(283); + setState(292); ((DynamicContext)_localctx).p = primary(_localctx.c); - setState(287); + setState(296); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,19,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(284); + setState(293); secondary(((DynamicContext)_localctx).p.s); } } } - setState(289); + setState(298); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,19,_ctx); } @@ -1940,23 +1984,23 @@ class PainlessParser extends Parser { _localctx = new StaticContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(290); + setState(299); decltype(); - setState(291); + setState(300); dot(); - setState(295); + setState(304); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(292); + setState(301); secondary(true); } } } - setState(297); + setState(306); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); } @@ -1966,11 +2010,11 @@ class PainlessParser extends Parser { _localctx = new NewarrayContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(298); + setState(307); match(NEW); - setState(299); + setState(308); match(TYPE); - setState(304); + setState(313); _errHandler.sync(this); _alt = 1; do { @@ -1978,11 +2022,11 @@ class PainlessParser extends Parser { case 1: { { - setState(300); + setState(309); match(LBRACE); - setState(301); + setState(310); expression(0); - setState(302); + setState(311); match(RBRACE); } } @@ -1990,29 +2034,29 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(306); + setState(315); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,21,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(315); + setState(324); switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) { case 1: { - setState(308); + setState(317); dot(); - setState(312); + setState(321); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(309); + setState(318); secondary(true); } } } - setState(314); + setState(323); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); } @@ -2114,19 +2158,19 @@ class PainlessParser extends Parser { PrimaryContext _localctx = new PrimaryContext(_ctx, getState(), c); enterRule(_localctx, 32, RULE_primary); try { - setState(335); + setState(344); switch ( getInterpreter().adaptivePredict(_input,25,_ctx) ) { case 1: _localctx = new ExprprecContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(319); + setState(328); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(320); + setState(329); match(LP); - setState(321); + setState(330); ((ExprprecContext)_localctx).e = expression(0); - setState(322); + setState(331); match(RP); ((ExprprecContext)_localctx).s = ((ExprprecContext)_localctx).e.s; } @@ -2135,13 +2179,13 @@ class PainlessParser extends Parser { _localctx = new ChainprecContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(325); + setState(334); if (!( _localctx.c )) throw new FailedPredicateException(this, " $c "); - setState(326); + setState(335); match(LP); - setState(327); + setState(336); unary(true); - setState(328); + setState(337); match(RP); } break; @@ -2149,7 +2193,7 @@ class PainlessParser extends Parser { _localctx = new StringContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(330); + setState(339); match(STRING); } break; @@ -2157,7 +2201,7 @@ class PainlessParser extends Parser { _localctx = new VariableContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(331); + setState(340); match(ID); } break; @@ -2165,11 +2209,11 @@ class PainlessParser extends Parser { _localctx = new NewobjectContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(332); + setState(341); match(NEW); - setState(333); + setState(342); match(TYPE); - setState(334); + setState(343); arguments(); } break; @@ -2211,23 +2255,23 @@ class PainlessParser extends Parser { SecondaryContext _localctx = new SecondaryContext(_ctx, getState(), s); enterRule(_localctx, 34, RULE_secondary); try { - setState(341); + setState(350); switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(337); + setState(346); if (!( _localctx.s )) throw new FailedPredicateException(this, " $s "); - setState(338); + setState(347); dot(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(339); + setState(348); if (!( _localctx.s )) throw new FailedPredicateException(this, " $s "); - setState(340); + setState(349); brace(); } break; @@ -2285,17 +2329,17 @@ class PainlessParser extends Parser { enterRule(_localctx, 36, RULE_dot); int _la; try { - setState(348); + setState(357); switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { case 1: _localctx = new CallinvokeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(343); + setState(352); match(DOT); - setState(344); + setState(353); match(DOTID); - setState(345); + setState(354); arguments(); } break; @@ -2303,9 +2347,9 @@ class PainlessParser extends Parser { _localctx = new FieldaccessContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(346); + setState(355); match(DOT); - setState(347); + setState(356); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2359,11 +2403,11 @@ class PainlessParser extends Parser { _localctx = new BraceaccessContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(350); + setState(359); match(LBRACE); - setState(351); + setState(360); expression(0); - setState(352); + setState(361); match(RBRACE); } } @@ -2410,34 +2454,34 @@ class PainlessParser extends Parser { enterOuterAlt(_localctx, 1); { { - setState(354); - match(LP); setState(363); + match(LP); + setState(372); switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: { - setState(355); + setState(364); argument(); - setState(360); + setState(369); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(356); + setState(365); match(COMMA); - setState(357); + setState(366); argument(); } } - setState(362); + setState(371); _errHandler.sync(this); _la = _input.LA(1); } } break; } - setState(365); + setState(374); match(RP); } } @@ -2475,19 +2519,19 @@ class PainlessParser extends Parser { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); enterRule(_localctx, 42, RULE_argument); try { - setState(369); + setState(378); switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(367); + setState(376); expression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(368); + setState(377); funcref(); } break; @@ -2594,141 +2638,146 @@ class PainlessParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3M\u0176\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3M\u017f\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\3\2\7\2\60\n\2\f\2"+ "\16\2\63\13\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3?\n\3\3\3\3\3"+ "\3\3\3\3\3\3\3\3\5\3G\n\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+ "\5\3T\n\3\3\3\3\3\5\3X\n\3\3\3\3\3\5\3\\\n\3\3\3\3\3\3\3\5\3a\n\3\3\3"+ - "\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\6\3q\n\3\r\3\16\3"+ - "r\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3|\n\3\3\4\3\4\5\4\u0080\n\4\3\5\3\5\7"+ - "\5\u0084\n\5\f\5\16\5\u0087\13\5\3\5\3\5\3\6\3\6\3\7\3\7\5\7\u008f\n\7"+ - "\3\b\3\b\3\t\3\t\3\t\3\t\7\t\u0097\n\t\f\t\16\t\u009a\13\t\3\n\3\n\3\n"+ - "\7\n\u009f\n\n\f\n\16\n\u00a2\13\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\5\f"+ - "\u00ab\n\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\17\3"+ - "\17\3\17\3\17\3\17\3\17\5\17\u00bf\n\17\3\17\3\17\3\17\3\17\3\17\3\17"+ + "\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+ + "\3\3\3\3\3\3\3\3\3\6\3z\n\3\r\3\16\3{\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3"+ + "\u0085\n\3\3\4\3\4\5\4\u0089\n\4\3\5\3\5\7\5\u008d\n\5\f\5\16\5\u0090"+ + "\13\5\3\5\3\5\3\6\3\6\3\7\3\7\5\7\u0098\n\7\3\b\3\b\3\t\3\t\3\t\3\t\7"+ + "\t\u00a0\n\t\f\t\16\t\u00a3\13\t\3\n\3\n\3\n\7\n\u00a8\n\n\f\n\16\n\u00ab"+ + "\13\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\5\f\u00b4\n\f\3\r\3\r\3\r\3\r\3"+ + "\r\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\5\17"+ + "\u00c8\n\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17"+ - "\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\3\17\7\17\u00fa\n\17\f\17\16"+ - "\17\u00fd\13\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ + "\3\17\3\17\3\17\7\17\u0103\n\17\f\17\16\17\u0106\13\17\3\20\3\20\3\20"+ "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\5\20\u011c\n\20\3\21\3\21\7\21\u0120\n\21\f\21\16"+ - "\21\u0123\13\21\3\21\3\21\3\21\7\21\u0128\n\21\f\21\16\21\u012b\13\21"+ - "\3\21\3\21\3\21\3\21\3\21\3\21\6\21\u0133\n\21\r\21\16\21\u0134\3\21\3"+ - "\21\7\21\u0139\n\21\f\21\16\21\u013c\13\21\5\21\u013e\n\21\5\21\u0140"+ - "\n\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22"+ - "\3\22\3\22\3\22\5\22\u0152\n\22\3\23\3\23\3\23\3\23\5\23\u0158\n\23\3"+ - "\24\3\24\3\24\3\24\3\24\5\24\u015f\n\24\3\25\3\25\3\25\3\25\3\26\3\26"+ - "\3\26\3\26\7\26\u0169\n\26\f\26\16\26\u016c\13\26\5\26\u016e\n\26\3\26"+ - "\3\26\3\27\3\27\5\27\u0174\n\27\3\27\2\3\34\30\2\4\6\b\n\f\16\20\22\24"+ - "\26\30\32\34\36 \"$&(*,\2\r\3\3\r\r\3\2\66A\3\2\34\36\3\2\37 \3\2!#\3"+ - "\2$\'\3\2(+\3\2\64\65\3\2BE\4\2\32\33\37 \3\2LM\u019b\2\61\3\2\2\2\4{"+ - "\3\2\2\2\6\177\3\2\2\2\b\u0081\3\2\2\2\n\u008a\3\2\2\2\f\u008e\3\2\2\2"+ - "\16\u0090\3\2\2\2\20\u0092\3\2\2\2\22\u009b\3\2\2\2\24\u00a3\3\2\2\2\26"+ - "\u00a7\3\2\2\2\30\u00ac\3\2\2\2\32\u00b3\3\2\2\2\34\u00be\3\2\2\2\36\u011b"+ - "\3\2\2\2 \u013f\3\2\2\2\"\u0151\3\2\2\2$\u0157\3\2\2\2&\u015e\3\2\2\2"+ - "(\u0160\3\2\2\2*\u0164\3\2\2\2,\u0173\3\2\2\2.\60\5\4\3\2/.\3\2\2\2\60"+ - "\63\3\2\2\2\61/\3\2\2\2\61\62\3\2\2\2\62\64\3\2\2\2\63\61\3\2\2\2\64\65"+ - "\7\2\2\3\65\3\3\2\2\2\66\67\7\16\2\2\678\7\t\2\289\5\34\17\29:\7\n\2\2"+ - ":>\5\6\4\2;<\7\17\2\2;\3\2\2\2>=\3\2\2\2?|\3\2\2"+ - "\2@A\7\20\2\2AB\7\t\2\2BC\5\34\17\2CF\7\n\2\2DG\5\6\4\2EG\5\n\6\2FD\3"+ - "\2\2\2FE\3\2\2\2G|\3\2\2\2HI\7\21\2\2IJ\5\b\5\2JK\7\20\2\2KL\7\t\2\2L"+ - "M\5\34\17\2MN\7\n\2\2NO\5\32\16\2O|\3\2\2\2PQ\7\22\2\2QS\7\t\2\2RT\5\f"+ - "\7\2SR\3\2\2\2ST\3\2\2\2TU\3\2\2\2UW\7\r\2\2VX\5\34\17\2WV\3\2\2\2WX\3"+ - "\2\2\2XY\3\2\2\2Y[\7\r\2\2Z\\\5\16\b\2[Z\3\2\2\2[\\\3\2\2\2\\]\3\2\2\2"+ - "]`\7\n\2\2^a\5\6\4\2_a\5\n\6\2`^\3\2\2\2`_\3\2\2\2a|\3\2\2\2bc\5\20\t"+ - "\2cd\5\32\16\2d|\3\2\2\2ef\7\23\2\2f|\5\32\16\2gh\7\24\2\2h|\5\32\16\2"+ - "ij\7\25\2\2jk\5\34\17\2kl\5\32\16\2l|\3\2\2\2mn\7\27\2\2np\5\b\5\2oq\5"+ - "\30\r\2po\3\2\2\2qr\3\2\2\2rp\3\2\2\2rs\3\2\2\2s|\3\2\2\2tu\7\31\2\2u"+ - "v\5\34\17\2vw\5\32\16\2w|\3\2\2\2xy\5\34\17\2yz\5\32\16\2z|\3\2\2\2{\66"+ - "\3\2\2\2{@\3\2\2\2{H\3\2\2\2{P\3\2\2\2{b\3\2\2\2{e\3\2\2\2{g\3\2\2\2{"+ - "i\3\2\2\2{m\3\2\2\2{t\3\2\2\2{x\3\2\2\2|\5\3\2\2\2}\u0080\5\b\5\2~\u0080"+ - "\5\4\3\2\177}\3\2\2\2\177~\3\2\2\2\u0080\7\3\2\2\2\u0081\u0085\7\5\2\2"+ - "\u0082\u0084\5\4\3\2\u0083\u0082\3\2\2\2\u0084\u0087\3\2\2\2\u0085\u0083"+ - "\3\2\2\2\u0085\u0086\3\2\2\2\u0086\u0088\3\2\2\2\u0087\u0085\3\2\2\2\u0088"+ - "\u0089\7\6\2\2\u0089\t\3\2\2\2\u008a\u008b\7\r\2\2\u008b\13\3\2\2\2\u008c"+ - "\u008f\5\20\t\2\u008d\u008f\5\34\17\2\u008e\u008c\3\2\2\2\u008e\u008d"+ - "\3\2\2\2\u008f\r\3\2\2\2\u0090\u0091\5\34\17\2\u0091\17\3\2\2\2\u0092"+ - "\u0093\5\22\n\2\u0093\u0098\5\26\f\2\u0094\u0095\7\f\2\2\u0095\u0097\5"+ - "\26\f\2\u0096\u0094\3\2\2\2\u0097\u009a\3\2\2\2\u0098\u0096\3\2\2\2\u0098"+ - "\u0099\3\2\2\2\u0099\21\3\2\2\2\u009a\u0098\3\2\2\2\u009b\u00a0\7J\2\2"+ - "\u009c\u009d\7\7\2\2\u009d\u009f\7\b\2\2\u009e\u009c\3\2\2\2\u009f\u00a2"+ - "\3\2\2\2\u00a0\u009e\3\2\2\2\u00a0\u00a1\3\2\2\2\u00a1\23\3\2\2\2\u00a2"+ - "\u00a0\3\2\2\2\u00a3\u00a4\7J\2\2\u00a4\u00a5\7\63\2\2\u00a5\u00a6\7K"+ - "\2\2\u00a6\25\3\2\2\2\u00a7\u00aa\7K\2\2\u00a8\u00a9\7\66\2\2\u00a9\u00ab"+ - "\5\34\17\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\27\3\2\2\2\u00ac"+ - "\u00ad\7\30\2\2\u00ad\u00ae\7\t\2\2\u00ae\u00af\7J\2\2\u00af\u00b0\7K"+ - "\2\2\u00b0\u00b1\7\n\2\2\u00b1\u00b2\5\b\5\2\u00b2\31\3\2\2\2\u00b3\u00b4"+ - "\t\2\2\2\u00b4\33\3\2\2\2\u00b5\u00b6\b\17\1\2\u00b6\u00b7\5 \21\2\u00b7"+ - "\u00b8\t\3\2\2\u00b8\u00b9\5\34\17\3\u00b9\u00ba\b\17\1\2\u00ba\u00bf"+ - "\3\2\2\2\u00bb\u00bc\5\36\20\2\u00bc\u00bd\b\17\1\2\u00bd\u00bf\3\2\2"+ - "\2\u00be\u00b5\3\2\2\2\u00be\u00bb\3\2\2\2\u00bf\u00fb\3\2\2\2\u00c0\u00c1"+ - "\f\16\2\2\u00c1\u00c2\t\4\2\2\u00c2\u00c3\5\34\17\17\u00c3\u00c4\b\17"+ - "\1\2\u00c4\u00fa\3\2\2\2\u00c5\u00c6\f\r\2\2\u00c6\u00c7\t\5\2\2\u00c7"+ - "\u00c8\5\34\17\16\u00c8\u00c9\b\17\1\2\u00c9\u00fa\3\2\2\2\u00ca\u00cb"+ - "\f\f\2\2\u00cb\u00cc\t\6\2\2\u00cc\u00cd\5\34\17\r\u00cd\u00ce\b\17\1"+ - "\2\u00ce\u00fa\3\2\2\2\u00cf\u00d0\f\13\2\2\u00d0\u00d1\t\7\2\2\u00d1"+ - "\u00d2\5\34\17\f\u00d2\u00d3\b\17\1\2\u00d3\u00fa\3\2\2\2\u00d4\u00d5"+ - "\f\n\2\2\u00d5\u00d6\t\b\2\2\u00d6\u00d7\5\34\17\13\u00d7\u00d8\b\17\1"+ - "\2\u00d8\u00fa\3\2\2\2\u00d9\u00da\f\t\2\2\u00da\u00db\7,\2\2\u00db\u00dc"+ - "\5\34\17\n\u00dc\u00dd\b\17\1\2\u00dd\u00fa\3\2\2\2\u00de\u00df\f\b\2"+ - "\2\u00df\u00e0\7-\2\2\u00e0\u00e1\5\34\17\t\u00e1\u00e2\b\17\1\2\u00e2"+ - "\u00fa\3\2\2\2\u00e3\u00e4\f\7\2\2\u00e4\u00e5\7.\2\2\u00e5\u00e6\5\34"+ - "\17\b\u00e6\u00e7\b\17\1\2\u00e7\u00fa\3\2\2\2\u00e8\u00e9\f\6\2\2\u00e9"+ - "\u00ea\7/\2\2\u00ea\u00eb\5\34\17\7\u00eb\u00ec\b\17\1\2\u00ec\u00fa\3"+ - "\2\2\2\u00ed\u00ee\f\5\2\2\u00ee\u00ef\7\60\2\2\u00ef\u00f0\5\34\17\6"+ - "\u00f0\u00f1\b\17\1\2\u00f1\u00fa\3\2\2\2\u00f2\u00f3\f\4\2\2\u00f3\u00f4"+ - "\7\61\2\2\u00f4\u00f5\5\34\17\2\u00f5\u00f6\7\62\2\2\u00f6\u00f7\5\34"+ - "\17\4\u00f7\u00f8\b\17\1\2\u00f8\u00fa\3\2\2\2\u00f9\u00c0\3\2\2\2\u00f9"+ - "\u00c5\3\2\2\2\u00f9\u00ca\3\2\2\2\u00f9\u00cf\3\2\2\2\u00f9\u00d4\3\2"+ - "\2\2\u00f9\u00d9\3\2\2\2\u00f9\u00de\3\2\2\2\u00f9\u00e3\3\2\2\2\u00f9"+ - "\u00e8\3\2\2\2\u00f9\u00ed\3\2\2\2\u00f9\u00f2\3\2\2\2\u00fa\u00fd\3\2"+ - "\2\2\u00fb\u00f9\3\2\2\2\u00fb\u00fc\3\2\2\2\u00fc\35\3\2\2\2\u00fd\u00fb"+ - "\3\2\2\2\u00fe\u00ff\6\20\16\3\u00ff\u0100\t\t\2\2\u0100\u011c\5 \21\2"+ - "\u0101\u0102\6\20\17\3\u0102\u0103\5 \21\2\u0103\u0104\t\t\2\2\u0104\u011c"+ - "\3\2\2\2\u0105\u0106\6\20\20\3\u0106\u011c\5 \21\2\u0107\u0108\6\20\21"+ - "\3\u0108\u0109\t\n\2\2\u0109\u011c\b\20\1\2\u010a\u010b\6\20\22\3\u010b"+ - "\u010c\7G\2\2\u010c\u011c\b\20\1\2\u010d\u010e\6\20\23\3\u010e\u010f\7"+ - "H\2\2\u010f\u011c\b\20\1\2\u0110\u0111\6\20\24\3\u0111\u0112\7I\2\2\u0112"+ - "\u011c\b\20\1\2\u0113\u0114\6\20\25\3\u0114\u0115\t\13\2\2\u0115\u011c"+ - "\5\36\20\2\u0116\u0117\7\t\2\2\u0117\u0118\5\22\n\2\u0118\u0119\7\n\2"+ - "\2\u0119\u011a\5\36\20\2\u011a\u011c\3\2\2\2\u011b\u00fe\3\2\2\2\u011b"+ - "\u0101\3\2\2\2\u011b\u0105\3\2\2\2\u011b\u0107\3\2\2\2\u011b\u010a\3\2"+ - "\2\2\u011b\u010d\3\2\2\2\u011b\u0110\3\2\2\2\u011b\u0113\3\2\2\2\u011b"+ - "\u0116\3\2\2\2\u011c\37\3\2\2\2\u011d\u0121\5\"\22\2\u011e\u0120\5$\23"+ - "\2\u011f\u011e\3\2\2\2\u0120\u0123\3\2\2\2\u0121\u011f\3\2\2\2\u0121\u0122"+ - "\3\2\2\2\u0122\u0140\3\2\2\2\u0123\u0121\3\2\2\2\u0124\u0125\5\22\n\2"+ - "\u0125\u0129\5&\24\2\u0126\u0128\5$\23\2\u0127\u0126\3\2\2\2\u0128\u012b"+ - "\3\2\2\2\u0129\u0127\3\2\2\2\u0129\u012a\3\2\2\2\u012a\u0140\3\2\2\2\u012b"+ - "\u0129\3\2\2\2\u012c\u012d\7\26\2\2\u012d\u0132\7J\2\2\u012e\u012f\7\7"+ - "\2\2\u012f\u0130\5\34\17\2\u0130\u0131\7\b\2\2\u0131\u0133\3\2\2\2\u0132"+ - "\u012e\3\2\2\2\u0133\u0134\3\2\2\2\u0134\u0132\3\2\2\2\u0134\u0135\3\2"+ - "\2\2\u0135\u013d\3\2\2\2\u0136\u013a\5&\24\2\u0137\u0139\5$\23\2\u0138"+ - "\u0137\3\2\2\2\u0139\u013c\3\2\2\2\u013a\u0138\3\2\2\2\u013a\u013b\3\2"+ - "\2\2\u013b\u013e\3\2\2\2\u013c\u013a\3\2\2\2\u013d\u0136\3\2\2\2\u013d"+ - "\u013e\3\2\2\2\u013e\u0140\3\2\2\2\u013f\u011d\3\2\2\2\u013f\u0124\3\2"+ - "\2\2\u013f\u012c\3\2\2\2\u0140!\3\2\2\2\u0141\u0142\6\22\26\3\u0142\u0143"+ - "\7\t\2\2\u0143\u0144\5\34\17\2\u0144\u0145\7\n\2\2\u0145\u0146\b\22\1"+ - "\2\u0146\u0152\3\2\2\2\u0147\u0148\6\22\27\3\u0148\u0149\7\t\2\2\u0149"+ - "\u014a\5\36\20\2\u014a\u014b\7\n\2\2\u014b\u0152\3\2\2\2\u014c\u0152\7"+ - "F\2\2\u014d\u0152\7K\2\2\u014e\u014f\7\26\2\2\u014f\u0150\7J\2\2\u0150"+ - "\u0152\5*\26\2\u0151\u0141\3\2\2\2\u0151\u0147\3\2\2\2\u0151\u014c\3\2"+ - "\2\2\u0151\u014d\3\2\2\2\u0151\u014e\3\2\2\2\u0152#\3\2\2\2\u0153\u0154"+ - "\6\23\30\3\u0154\u0158\5&\24\2\u0155\u0156\6\23\31\3\u0156\u0158\5(\25"+ - "\2\u0157\u0153\3\2\2\2\u0157\u0155\3\2\2\2\u0158%\3\2\2\2\u0159\u015a"+ - "\7\13\2\2\u015a\u015b\7M\2\2\u015b\u015f\5*\26\2\u015c\u015d\7\13\2\2"+ - "\u015d\u015f\t\f\2\2\u015e\u0159\3\2\2\2\u015e\u015c\3\2\2\2\u015f\'\3"+ - "\2\2\2\u0160\u0161\7\7\2\2\u0161\u0162\5\34\17\2\u0162\u0163\7\b\2\2\u0163"+ - ")\3\2\2\2\u0164\u016d\7\t\2\2\u0165\u016a\5,\27\2\u0166\u0167\7\f\2\2"+ - "\u0167\u0169\5,\27\2\u0168\u0166\3\2\2\2\u0169\u016c\3\2\2\2\u016a\u0168"+ - "\3\2\2\2\u016a\u016b\3\2\2\2\u016b\u016e\3\2\2\2\u016c\u016a\3\2\2\2\u016d"+ - "\u0165\3\2\2\2\u016d\u016e\3\2\2\2\u016e\u016f\3\2\2\2\u016f\u0170\7\n"+ - "\2\2\u0170+\3\2\2\2\u0171\u0174\5\34\17\2\u0172\u0174\5\24\13\2\u0173"+ - "\u0171\3\2\2\2\u0173\u0172\3\2\2\2\u0174-\3\2\2\2!\61>FSW[`r{\177\u0085"+ - "\u008e\u0098\u00a0\u00aa\u00be\u00f9\u00fb\u011b\u0121\u0129\u0134\u013a"+ - "\u013d\u013f\u0151\u0157\u015e\u016a\u016d\u0173"; + "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\5\20\u0125"+ + "\n\20\3\21\3\21\7\21\u0129\n\21\f\21\16\21\u012c\13\21\3\21\3\21\3\21"+ + "\7\21\u0131\n\21\f\21\16\21\u0134\13\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\6\21\u013c\n\21\r\21\16\21\u013d\3\21\3\21\7\21\u0142\n\21\f\21\16\21"+ + "\u0145\13\21\5\21\u0147\n\21\5\21\u0149\n\21\3\22\3\22\3\22\3\22\3\22"+ + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u015b\n\22"+ + "\3\23\3\23\3\23\3\23\5\23\u0161\n\23\3\24\3\24\3\24\3\24\3\24\5\24\u0168"+ + "\n\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\7\26\u0172\n\26\f\26\16"+ + "\26\u0175\13\26\5\26\u0177\n\26\3\26\3\26\3\27\3\27\5\27\u017d\n\27\3"+ + "\27\2\3\34\30\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,\2\r\3\3\r"+ + "\r\3\2\66A\3\2\34\36\3\2\37 \3\2!#\3\2$\'\3\2(+\3\2\64\65\3\2BE\4\2\32"+ + "\33\37 \3\2LM\u01a5\2\61\3\2\2\2\4\u0084\3\2\2\2\6\u0088\3\2\2\2\b\u008a"+ + "\3\2\2\2\n\u0093\3\2\2\2\f\u0097\3\2\2\2\16\u0099\3\2\2\2\20\u009b\3\2"+ + "\2\2\22\u00a4\3\2\2\2\24\u00ac\3\2\2\2\26\u00b0\3\2\2\2\30\u00b5\3\2\2"+ + "\2\32\u00bc\3\2\2\2\34\u00c7\3\2\2\2\36\u0124\3\2\2\2 \u0148\3\2\2\2\""+ + "\u015a\3\2\2\2$\u0160\3\2\2\2&\u0167\3\2\2\2(\u0169\3\2\2\2*\u016d\3\2"+ + "\2\2,\u017c\3\2\2\2.\60\5\4\3\2/.\3\2\2\2\60\63\3\2\2\2\61/\3\2\2\2\61"+ + "\62\3\2\2\2\62\64\3\2\2\2\63\61\3\2\2\2\64\65\7\2\2\3\65\3\3\2\2\2\66"+ + "\67\7\16\2\2\678\7\t\2\289\5\34\17\29:\7\n\2\2:>\5\6\4\2;<\7\17\2\2;\3\2\2\2>=\3\2\2\2?\u0085\3\2\2\2@A\7\20\2\2AB\7\t"+ + "\2\2BC\5\34\17\2CF\7\n\2\2DG\5\6\4\2EG\5\n\6\2FD\3\2\2\2FE\3\2\2\2G\u0085"+ + "\3\2\2\2HI\7\21\2\2IJ\5\b\5\2JK\7\20\2\2KL\7\t\2\2LM\5\34\17\2MN\7\n\2"+ + "\2NO\5\32\16\2O\u0085\3\2\2\2PQ\7\22\2\2QS\7\t\2\2RT\5\f\7\2SR\3\2\2\2"+ + "ST\3\2\2\2TU\3\2\2\2UW\7\r\2\2VX\5\34\17\2WV\3\2\2\2WX\3\2\2\2XY\3\2\2"+ + "\2Y[\7\r\2\2Z\\\5\16\b\2[Z\3\2\2\2[\\\3\2\2\2\\]\3\2\2\2]`\7\n\2\2^a\5"+ + "\6\4\2_a\5\n\6\2`^\3\2\2\2`_\3\2\2\2a\u0085\3\2\2\2bc\7\22\2\2cd\7\t\2"+ + "\2de\5\22\n\2ef\7K\2\2fg\7\62\2\2gh\5\34\17\2hi\7\n\2\2ij\5\6\4\2j\u0085"+ + "\3\2\2\2kl\5\20\t\2lm\5\32\16\2m\u0085\3\2\2\2no\7\23\2\2o\u0085\5\32"+ + "\16\2pq\7\24\2\2q\u0085\5\32\16\2rs\7\25\2\2st\5\34\17\2tu\5\32\16\2u"+ + "\u0085\3\2\2\2vw\7\27\2\2wy\5\b\5\2xz\5\30\r\2yx\3\2\2\2z{\3\2\2\2{y\3"+ + "\2\2\2{|\3\2\2\2|\u0085\3\2\2\2}~\7\31\2\2~\177\5\34\17\2\177\u0080\5"+ + "\32\16\2\u0080\u0085\3\2\2\2\u0081\u0082\5\34\17\2\u0082\u0083\5\32\16"+ + "\2\u0083\u0085\3\2\2\2\u0084\66\3\2\2\2\u0084@\3\2\2\2\u0084H\3\2\2\2"+ + "\u0084P\3\2\2\2\u0084b\3\2\2\2\u0084k\3\2\2\2\u0084n\3\2\2\2\u0084p\3"+ + "\2\2\2\u0084r\3\2\2\2\u0084v\3\2\2\2\u0084}\3\2\2\2\u0084\u0081\3\2\2"+ + "\2\u0085\5\3\2\2\2\u0086\u0089\5\b\5\2\u0087\u0089\5\4\3\2\u0088\u0086"+ + "\3\2\2\2\u0088\u0087\3\2\2\2\u0089\7\3\2\2\2\u008a\u008e\7\5\2\2\u008b"+ + "\u008d\5\4\3\2\u008c\u008b\3\2\2\2\u008d\u0090\3\2\2\2\u008e\u008c\3\2"+ + "\2\2\u008e\u008f\3\2\2\2\u008f\u0091\3\2\2\2\u0090\u008e\3\2\2\2\u0091"+ + "\u0092\7\6\2\2\u0092\t\3\2\2\2\u0093\u0094\7\r\2\2\u0094\13\3\2\2\2\u0095"+ + "\u0098\5\20\t\2\u0096\u0098\5\34\17\2\u0097\u0095\3\2\2\2\u0097\u0096"+ + "\3\2\2\2\u0098\r\3\2\2\2\u0099\u009a\5\34\17\2\u009a\17\3\2\2\2\u009b"+ + "\u009c\5\22\n\2\u009c\u00a1\5\26\f\2\u009d\u009e\7\f\2\2\u009e\u00a0\5"+ + "\26\f\2\u009f\u009d\3\2\2\2\u00a0\u00a3\3\2\2\2\u00a1\u009f\3\2\2\2\u00a1"+ + "\u00a2\3\2\2\2\u00a2\21\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a4\u00a9\7J\2\2"+ + "\u00a5\u00a6\7\7\2\2\u00a6\u00a8\7\b\2\2\u00a7\u00a5\3\2\2\2\u00a8\u00ab"+ + "\3\2\2\2\u00a9\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\23\3\2\2\2\u00ab"+ + "\u00a9\3\2\2\2\u00ac\u00ad\7J\2\2\u00ad\u00ae\7\63\2\2\u00ae\u00af\7K"+ + "\2\2\u00af\25\3\2\2\2\u00b0\u00b3\7K\2\2\u00b1\u00b2\7\66\2\2\u00b2\u00b4"+ + "\5\34\17\2\u00b3\u00b1\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\27\3\2\2\2\u00b5"+ + "\u00b6\7\30\2\2\u00b6\u00b7\7\t\2\2\u00b7\u00b8\7J\2\2\u00b8\u00b9\7K"+ + "\2\2\u00b9\u00ba\7\n\2\2\u00ba\u00bb\5\b\5\2\u00bb\31\3\2\2\2\u00bc\u00bd"+ + "\t\2\2\2\u00bd\33\3\2\2\2\u00be\u00bf\b\17\1\2\u00bf\u00c0\5 \21\2\u00c0"+ + "\u00c1\t\3\2\2\u00c1\u00c2\5\34\17\3\u00c2\u00c3\b\17\1\2\u00c3\u00c8"+ + "\3\2\2\2\u00c4\u00c5\5\36\20\2\u00c5\u00c6\b\17\1\2\u00c6\u00c8\3\2\2"+ + "\2\u00c7\u00be\3\2\2\2\u00c7\u00c4\3\2\2\2\u00c8\u0104\3\2\2\2\u00c9\u00ca"+ + "\f\16\2\2\u00ca\u00cb\t\4\2\2\u00cb\u00cc\5\34\17\17\u00cc\u00cd\b\17"+ + "\1\2\u00cd\u0103\3\2\2\2\u00ce\u00cf\f\r\2\2\u00cf\u00d0\t\5\2\2\u00d0"+ + "\u00d1\5\34\17\16\u00d1\u00d2\b\17\1\2\u00d2\u0103\3\2\2\2\u00d3\u00d4"+ + "\f\f\2\2\u00d4\u00d5\t\6\2\2\u00d5\u00d6\5\34\17\r\u00d6\u00d7\b\17\1"+ + "\2\u00d7\u0103\3\2\2\2\u00d8\u00d9\f\13\2\2\u00d9\u00da\t\7\2\2\u00da"+ + "\u00db\5\34\17\f\u00db\u00dc\b\17\1\2\u00dc\u0103\3\2\2\2\u00dd\u00de"+ + "\f\n\2\2\u00de\u00df\t\b\2\2\u00df\u00e0\5\34\17\13\u00e0\u00e1\b\17\1"+ + "\2\u00e1\u0103\3\2\2\2\u00e2\u00e3\f\t\2\2\u00e3\u00e4\7,\2\2\u00e4\u00e5"+ + "\5\34\17\n\u00e5\u00e6\b\17\1\2\u00e6\u0103\3\2\2\2\u00e7\u00e8\f\b\2"+ + "\2\u00e8\u00e9\7-\2\2\u00e9\u00ea\5\34\17\t\u00ea\u00eb\b\17\1\2\u00eb"+ + "\u0103\3\2\2\2\u00ec\u00ed\f\7\2\2\u00ed\u00ee\7.\2\2\u00ee\u00ef\5\34"+ + "\17\b\u00ef\u00f0\b\17\1\2\u00f0\u0103\3\2\2\2\u00f1\u00f2\f\6\2\2\u00f2"+ + "\u00f3\7/\2\2\u00f3\u00f4\5\34\17\7\u00f4\u00f5\b\17\1\2\u00f5\u0103\3"+ + "\2\2\2\u00f6\u00f7\f\5\2\2\u00f7\u00f8\7\60\2\2\u00f8\u00f9\5\34\17\6"+ + "\u00f9\u00fa\b\17\1\2\u00fa\u0103\3\2\2\2\u00fb\u00fc\f\4\2\2\u00fc\u00fd"+ + "\7\61\2\2\u00fd\u00fe\5\34\17\2\u00fe\u00ff\7\62\2\2\u00ff\u0100\5\34"+ + "\17\4\u0100\u0101\b\17\1\2\u0101\u0103\3\2\2\2\u0102\u00c9\3\2\2\2\u0102"+ + "\u00ce\3\2\2\2\u0102\u00d3\3\2\2\2\u0102\u00d8\3\2\2\2\u0102\u00dd\3\2"+ + "\2\2\u0102\u00e2\3\2\2\2\u0102\u00e7\3\2\2\2\u0102\u00ec\3\2\2\2\u0102"+ + "\u00f1\3\2\2\2\u0102\u00f6\3\2\2\2\u0102\u00fb\3\2\2\2\u0103\u0106\3\2"+ + "\2\2\u0104\u0102\3\2\2\2\u0104\u0105\3\2\2\2\u0105\35\3\2\2\2\u0106\u0104"+ + "\3\2\2\2\u0107\u0108\6\20\16\3\u0108\u0109\t\t\2\2\u0109\u0125\5 \21\2"+ + "\u010a\u010b\6\20\17\3\u010b\u010c\5 \21\2\u010c\u010d\t\t\2\2\u010d\u0125"+ + "\3\2\2\2\u010e\u010f\6\20\20\3\u010f\u0125\5 \21\2\u0110\u0111\6\20\21"+ + "\3\u0111\u0112\t\n\2\2\u0112\u0125\b\20\1\2\u0113\u0114\6\20\22\3\u0114"+ + "\u0115\7G\2\2\u0115\u0125\b\20\1\2\u0116\u0117\6\20\23\3\u0117\u0118\7"+ + "H\2\2\u0118\u0125\b\20\1\2\u0119\u011a\6\20\24\3\u011a\u011b\7I\2\2\u011b"+ + "\u0125\b\20\1\2\u011c\u011d\6\20\25\3\u011d\u011e\t\13\2\2\u011e\u0125"+ + "\5\36\20\2\u011f\u0120\7\t\2\2\u0120\u0121\5\22\n\2\u0121\u0122\7\n\2"+ + "\2\u0122\u0123\5\36\20\2\u0123\u0125\3\2\2\2\u0124\u0107\3\2\2\2\u0124"+ + "\u010a\3\2\2\2\u0124\u010e\3\2\2\2\u0124\u0110\3\2\2\2\u0124\u0113\3\2"+ + "\2\2\u0124\u0116\3\2\2\2\u0124\u0119\3\2\2\2\u0124\u011c\3\2\2\2\u0124"+ + "\u011f\3\2\2\2\u0125\37\3\2\2\2\u0126\u012a\5\"\22\2\u0127\u0129\5$\23"+ + "\2\u0128\u0127\3\2\2\2\u0129\u012c\3\2\2\2\u012a\u0128\3\2\2\2\u012a\u012b"+ + "\3\2\2\2\u012b\u0149\3\2\2\2\u012c\u012a\3\2\2\2\u012d\u012e\5\22\n\2"+ + "\u012e\u0132\5&\24\2\u012f\u0131\5$\23\2\u0130\u012f\3\2\2\2\u0131\u0134"+ + "\3\2\2\2\u0132\u0130\3\2\2\2\u0132\u0133\3\2\2\2\u0133\u0149\3\2\2\2\u0134"+ + "\u0132\3\2\2\2\u0135\u0136\7\26\2\2\u0136\u013b\7J\2\2\u0137\u0138\7\7"+ + "\2\2\u0138\u0139\5\34\17\2\u0139\u013a\7\b\2\2\u013a\u013c\3\2\2\2\u013b"+ + "\u0137\3\2\2\2\u013c\u013d\3\2\2\2\u013d\u013b\3\2\2\2\u013d\u013e\3\2"+ + "\2\2\u013e\u0146\3\2\2\2\u013f\u0143\5&\24\2\u0140\u0142\5$\23\2\u0141"+ + "\u0140\3\2\2\2\u0142\u0145\3\2\2\2\u0143\u0141\3\2\2\2\u0143\u0144\3\2"+ + "\2\2\u0144\u0147\3\2\2\2\u0145\u0143\3\2\2\2\u0146\u013f\3\2\2\2\u0146"+ + "\u0147\3\2\2\2\u0147\u0149\3\2\2\2\u0148\u0126\3\2\2\2\u0148\u012d\3\2"+ + "\2\2\u0148\u0135\3\2\2\2\u0149!\3\2\2\2\u014a\u014b\6\22\26\3\u014b\u014c"+ + "\7\t\2\2\u014c\u014d\5\34\17\2\u014d\u014e\7\n\2\2\u014e\u014f\b\22\1"+ + "\2\u014f\u015b\3\2\2\2\u0150\u0151\6\22\27\3\u0151\u0152\7\t\2\2\u0152"+ + "\u0153\5\36\20\2\u0153\u0154\7\n\2\2\u0154\u015b\3\2\2\2\u0155\u015b\7"+ + "F\2\2\u0156\u015b\7K\2\2\u0157\u0158\7\26\2\2\u0158\u0159\7J\2\2\u0159"+ + "\u015b\5*\26\2\u015a\u014a\3\2\2\2\u015a\u0150\3\2\2\2\u015a\u0155\3\2"+ + "\2\2\u015a\u0156\3\2\2\2\u015a\u0157\3\2\2\2\u015b#\3\2\2\2\u015c\u015d"+ + "\6\23\30\3\u015d\u0161\5&\24\2\u015e\u015f\6\23\31\3\u015f\u0161\5(\25"+ + "\2\u0160\u015c\3\2\2\2\u0160\u015e\3\2\2\2\u0161%\3\2\2\2\u0162\u0163"+ + "\7\13\2\2\u0163\u0164\7M\2\2\u0164\u0168\5*\26\2\u0165\u0166\7\13\2\2"+ + "\u0166\u0168\t\f\2\2\u0167\u0162\3\2\2\2\u0167\u0165\3\2\2\2\u0168\'\3"+ + "\2\2\2\u0169\u016a\7\7\2\2\u016a\u016b\5\34\17\2\u016b\u016c\7\b\2\2\u016c"+ + ")\3\2\2\2\u016d\u0176\7\t\2\2\u016e\u0173\5,\27\2\u016f\u0170\7\f\2\2"+ + "\u0170\u0172\5,\27\2\u0171\u016f\3\2\2\2\u0172\u0175\3\2\2\2\u0173\u0171"+ + "\3\2\2\2\u0173\u0174\3\2\2\2\u0174\u0177\3\2\2\2\u0175\u0173\3\2\2\2\u0176"+ + "\u016e\3\2\2\2\u0176\u0177\3\2\2\2\u0177\u0178\3\2\2\2\u0178\u0179\7\n"+ + "\2\2\u0179+\3\2\2\2\u017a\u017d\5\34\17\2\u017b\u017d\5\24\13\2\u017c"+ + "\u017a\3\2\2\2\u017c\u017b\3\2\2\2\u017d-\3\2\2\2!\61>FSW[`{\u0084\u0088"+ + "\u008e\u0097\u00a1\u00a9\u00b3\u00c7\u0102\u0104\u0124\u012a\u0132\u013d"+ + "\u0143\u0146\u0148\u015a\u0160\u0167\u0173\u0176\u017c"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java index b4429fc0841..f116f087c5c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java @@ -46,6 +46,13 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitFor(PainlessParser.ForContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitEach(PainlessParser.EachContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java index 6dc511cb4e9..f0943743ef8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java @@ -44,6 +44,13 @@ interface PainlessParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitFor(PainlessParser.ForContext ctx); + /** + * Visit a parse tree produced by the {@code each} + * labeled alternative in {@link PainlessParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitEach(PainlessParser.EachContext ctx); /** * Visit a parse tree produced by the {@code decl} * labeled alternative in {@link PainlessParser#statement}. From f5be0982d9c8ec61c1c8189987dc643ba9f8571e Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Fri, 27 May 2016 17:41:31 -0700 Subject: [PATCH 04/39] Added initial infrastructure to allow for each to be made. --- .../src/main/antlr/PainlessLexer.g4 | 4 + .../org/elasticsearch/painless/Compiler.java | 7 +- .../elasticsearch/painless/ScriptImpl.java | 10 +- .../painless/node/AExpression.java | 52 ++++++++-- .../painless/node/AStatement.java | 5 +- .../elasticsearch/painless/node/EBinary.java | 1 + .../elasticsearch/painless/node/EChain.java | 94 +++++++++++++------ .../elasticsearch/painless/node/EComp.java | 1 + .../painless/node/EConditional.java | 1 + .../elasticsearch/painless/node/EUnary.java | 1 + .../elasticsearch/painless/node/LBrace.java | 1 - .../elasticsearch/painless/node/LCall.java | 1 + .../painless/node/LDefArray.java | 2 + .../elasticsearch/painless/node/LDefCall.java | 1 + .../painless/node/LDefField.java | 2 + .../elasticsearch/painless/node/LField.java | 2 + .../painless/node/LListShortcut.java | 2 + .../painless/node/LMapShortcut.java | 2 + .../painless/node/LNewArray.java | 1 + .../painless/node/LShortcut.java | 2 + .../elasticsearch/painless/node/SBlock.java | 21 +++-- .../elasticsearch/painless/node/SBreak.java | 4 +- .../elasticsearch/painless/node/SCatch.java | 9 +- .../painless/node/SContinue.java | 4 +- .../painless/node/SDeclBlock.java | 15 +-- .../painless/node/SDeclaration.java | 5 +- .../org/elasticsearch/painless/node/SDo.java | 9 +- .../painless/node/SExpression.java | 4 +- .../org/elasticsearch/painless/node/SFor.java | 13 ++- .../org/elasticsearch/painless/node/SIf.java | 9 +- .../elasticsearch/painless/node/SIfElse.java | 13 ++- .../elasticsearch/painless/node/SReturn.java | 4 +- .../elasticsearch/painless/node/SSource.java | 15 +-- .../elasticsearch/painless/node/SThrow.java | 4 +- .../org/elasticsearch/painless/node/STry.java | 20 ++-- .../elasticsearch/painless/node/SWhile.java | 9 +- 36 files changed, 250 insertions(+), 100 deletions(-) diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 6e34d35111d..4ca5778ad3c 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -32,6 +32,10 @@ LBRACE: '['; RBRACE: ']'; LP: '('; RP: ')'; +// We switch modes after a dot to ensure there are not conflicts +// between shortcuts and decimal values. Without the mode switch +// shortcuts such as id.0.0 will fail because 0.0 will be interpreted +// as a decimal value instead of two individual list-style shortcuts. DOT: '.' -> mode(AFTER_DOT); COMMA: ','; SEMICOLON: ';'; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index 36d41056013..961c5ab8c2e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -104,17 +104,16 @@ final class Compiler { SSource root = Walker.buildPainlessTree(source, reserved, settings); Variables variables = Analyzer.analyze(reserved, root); BitSet expressions = new BitSet(source.length()); - byte[] bytes = Writer.write(settings, name, source, variables, root, expressions); + try { Class clazz = loader.define(CLASS_NAME, bytes); - java.lang.reflect.Constructor constructor = + java.lang.reflect.Constructor constructor = clazz.getConstructor(String.class, String.class, BitSet.class); return constructor.newInstance(name, source, expressions); } catch (Exception exception) { // Catch everything to let the user know this is something caused internally. - throw new IllegalStateException( - "An internal error occurred attempting to define the script [" + name + "].", exception); + throw new IllegalStateException("An internal error occurred attempting to define the script [" + name + "].", exception); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java index 83ae7b664c1..c601bfc2064 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptImpl.java @@ -123,7 +123,13 @@ final class ScriptImpl implements ExecutableScript, LeafSearchScript { throw convertToScriptException(t); } } - + + /** + * Adds stack trace and other useful information to exceptiosn thrown + * from a Painless script. + * @param t The throwable to build an exception around. + * @return The generated ScriptException. + */ private ScriptException convertToScriptException(Throwable t) { // create a script stack: this is just the script portion List scriptStack = new ArrayList<>(); @@ -169,7 +175,7 @@ final class ScriptImpl implements ExecutableScript, LeafSearchScript { } throw new ScriptException("runtime error", t, scriptStack, name, PainlessScriptEngineService.NAME); } - + /** returns true for methods that are part of the runtime */ private static boolean shouldFilter(StackTraceElement element) { return element.getClassName().startsWith("org.elasticsearch.painless.") || diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java index c46181817e2..f82e1c2da81 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java @@ -122,9 +122,19 @@ public abstract class AExpression extends ANode { if (cast == null) { if (constant == null || this instanceof EConstant) { + // For the case where a cast is not required and a constant is not set + // or the node is already an EConstant no changes are required to the tree. + return this; } else { - final EConstant econstant = new EConstant(line, offset, location, constant); + // For the case where a cast is not required but a + // constant is set, an EConstant replaces this node + // with the constant copied from this node. Note that + // for constants output data does not need to be copied + // from this node because the output data for the EConstant + // will already be the same. + + EConstant econstant = new EConstant(line, offset, location, constant); econstant.analyze(variables); if (!expected.equals(econstant.actual)) { @@ -135,7 +145,12 @@ public abstract class AExpression extends ANode { } } else { if (constant == null) { - final ECast ecast = new ECast(line, offset, location, this, cast); + // For the case where a cast is required and a constant is not set. + // Modify the tree to add an ECast between this node and its parent. + // The output data from this node is copied to the ECast for + // further reads done by the parent. + + ECast ecast = new ECast(line, offset, location, this, cast); ecast.statement = statement; ecast.actual = expected; ecast.isNull = isNull; @@ -143,9 +158,17 @@ public abstract class AExpression extends ANode { return ecast; } else { if (expected.sort.constant) { + // For the case where a cast is required, a constant is set, + // and the constant can be immediately cast to the expected type. + // An EConstant replaces this node with the constant cast appropriately + // from the constant value defined by this node. Note that + // for constants output data does not need to be copied + // from this node because the output data for the EConstant + // will already be the same. + constant = AnalyzerCaster.constCast(location, constant, cast); - final EConstant econstant = new EConstant(line, offset, location, constant); + EConstant econstant = new EConstant(line, offset, location, constant); econstant.analyze(variables); if (!expected.equals(econstant.actual)) { @@ -154,19 +177,36 @@ public abstract class AExpression extends ANode { return econstant; } else if (this instanceof EConstant) { - final ECast ecast = new ECast(line, offset, location, this, cast); + // For the case where a cast is required, a constant is set, + // the constant cannot be immediately cast to the expected type, + // and this node is already an EConstant. Modify the tree to add + // an ECast between this node and its parent. Note that + // for constants output data does not need to be copied + // from this node because the output data for the EConstant + // will already be the same. + + ECast ecast = new ECast(line, offset, location, this, cast); ecast.actual = expected; return ecast; } else { - final EConstant econstant = new EConstant(line, offset, location, constant); + // For the case where a cast is required, a constant is set, + // the constant cannot be immediately cast to the expected type, + // and this node is not an EConstant. Replace this node with + // an Econstant node copying the constant from this node. + // Modify the tree to add an ECast between the EConstant node + // and its parent. Note that for constants output data does not + // need to be copied from this node because the output data for + // the EConstant will already be the same. + + EConstant econstant = new EConstant(line, offset, location, constant); econstant.analyze(variables); if (!actual.equals(econstant.actual)) { throw new IllegalStateException(error("Illegal tree structure.")); } - final ECast ecast = new ECast(line, offset, location, econstant, cast); + ECast ecast = new ECast(line, offset, location, econstant, cast); ecast.actual = expected; return ecast; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java index 80d24b4cab3..276a2b43865 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java @@ -113,8 +113,11 @@ public abstract class AStatement extends ANode { /** * Checks for errors and collects data for the writing phase. + * @return The new child node for the parent node calling this method. + * Possibly returns a different {@link AStatement} node if a type is + * def or if a different specialization is used. Otherwise, returns itself. */ - abstract void analyze(Variables variables); + abstract AStatement analyze(Variables variables); /** * Writes ASM based on the data collected during the analysis phase. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java index 23179a9ed0d..92b565e34df 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java @@ -471,6 +471,7 @@ public final class EBinary extends AExpression { @Override void write(MethodWriter writer) { writer.writeDebugInfo(offset); + if (actual.sort == Sort.STRING && operation == Operation.ADD) { if (!cat) { writer.writeNewStrings(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java index affd1fe78c0..af111b99995 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java @@ -246,75 +246,115 @@ public final class EChain extends AExpression { actual = last.after; } + /** + * Handles writing byte code for variable/method chains for all given possibilities + * including String concatenation, compound assignment, regular assignment, and simple + * reads. Includes proper duplication for chained assignments and assignments that are + * also read from. + * + * Example given 'x[0] += 5;' where x is an array of shorts and x[0] is 1. + * Note this example has two links -- x (LVariable) and [0] (LBrace). + * The following steps occur: + * 1. call link{x}.write(...) -- no op [...] + * 2. call link{x}.load(...) -- loads the address of the x array onto the stack [..., address(x)] + * 3. call writer.dup(...) -- dup's the address of the x array onto the stack for later use with store [..., address(x), address(x)] + * 4. call link{[0]}.write(...) -- load the array index value of the constant int 0 onto the stack [..., address(x), address(x), int(0)] + * 5. call link{[0]}.load(...) -- load the short value from x[0] onto the stack [..., address(x), short(1)] + * 6. call writer.writeCast(there) -- casts the short on the stack to an int so it can be added with the rhs [..., address(x), int(1)] + * 7. call expression.write(...) -- puts the expression's value of the constant int 5 onto the stack [..., address(x), int(1), int(5)] + * 8. call writer.writeBinaryInstruction(operation) -- writes the int addition instruction [..., address(x), int(6)] + * 9. call writer.writeCast(back) -- convert the value on the stack back into a short [..., address(x), short(6)] + * 10. call link{[0]}.store(...) -- store the value on the stack into the 0th index of the array x [...] + */ @Override void write(MethodWriter writer) { + // For the case where the chain represents a String concatenation + // we must first write debug information, and then depending on the + // Java version write a StringBuilder or track types going onto the + // stack. This must be done before the links in the chain are read + // because we need the StringBuilder to be placed on the stack + // ahead of any potential concatenation arguments. if (cat) { writer.writeDebugInfo(offset); - } - - if (cat) { writer.writeNewStrings(); } ALink last = links.get(links.size() - 1); + // Go through all the links in the chain first calling write + // and then load, except for the final link which may be a store. + // See individual links for more information on what each of the + // write, load, and store methods do. for (ALink link : links) { - link.write(writer); + link.write(writer); // call the write method on the link to prepare for a load/store operation if (link == last && link.store) { if (cat) { - writer.writeDup(link.size, 1); - link.load(writer); - writer.writeAppendStrings(link.after); + // Handle the case where we are doing a compound assignment + // representing a String concatenation. - expression.write(writer); + writer.writeDup(link.size, 1); // dup the StringBuilder + link.load(writer); // read the current link's value + writer.writeAppendStrings(link.after); // append the link's value using the StringBuilder + + expression.write(writer); // write the bytecode for the rhs expression if (!(expression instanceof EBinary) || ((EBinary)expression).operation != Operation.ADD || expression.actual.sort != Sort.STRING) { - writer.writeAppendStrings(expression.actual); + writer.writeAppendStrings(expression.actual); // append the expression's value unless its also a concatenation } - writer.writeToStrings(); - writer.writeCast(back); + writer.writeToStrings(); // put the value of the StringBuilder on the stack + writer.writeCast(back); // if necessary, cast the String to the lhs actual type if (link.load) { - writer.writeDup(link.after.sort.size, link.size); + writer.writeDup(link.after.sort.size, link.size); // if this link is also read from dup the value onto the stack } - link.store(writer); + link.store(writer); // store the link's value from the stack in its respective variable/field/array } else if (operation != null) { - writer.writeDup(link.size, 0); - link.load(writer); + // Handle the case where we are doing a compound assignment that + // does not represent a String concatenation. + + writer.writeDup(link.size, 0); // if necessary, dup the previous link's value to be both loaded from and stored to + link.load(writer); // load the current link's value if (link.load && post) { - writer.writeDup(link.after.sort.size, link.size); + writer.writeDup(link.after.sort.size, link.size); // dup the value if the link is also + // read from and is a post increment } - writer.writeCast(there); - expression.write(writer); - writer.writeBinaryInstruction(location, promote, operation); + writer.writeCast(there); // if necessary cast the current link's value + // to the promotion type between the lhs and rhs types + expression.write(writer); // write the bytecode for the rhs expression + writer.writeBinaryInstruction(location, promote, operation); // write the operation instruction for compound assignment - writer.writeCast(back); + writer.writeCast(back); // if necessary cast the promotion type value back to the link's type if (link.load && !post) { - writer.writeDup(link.after.sort.size, link.size); + writer.writeDup(link.after.sort.size, link.size); // dup the value if the link is also + // read from and is not a post increment } - link.store(writer); + link.store(writer); // store the link's value from the stack in its respective variable/field/array } else { - expression.write(writer); + // Handle the case for a simple write. + + expression.write(writer); // write the bytecode for the rhs expression if (link.load) { - writer.writeDup(link.after.sort.size, link.size); + writer.writeDup(link.after.sort.size, link.size); // dup the value if the link is also read from } - link.store(writer); + link.store(writer); // store the link's value from the stack in its respective variable/field/array } } else { - link.load(writer); + // Handle the case for a simple read. + + link.load(writer); // read the link's value onto the stack } } - writer.writeBranch(tru, fals); + writer.writeBranch(tru, fals); // if this is a branch node, write the bytecode to make an appropiate jump } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java index 5bc6e4ee400..8e681d6b3c3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java @@ -400,6 +400,7 @@ public final class EComp extends AExpression { @Override void write(MethodWriter writer) { writer.writeDebugInfo(offset); + boolean branch = tru != null || fals != null; org.objectweb.asm.Type rtype = right.actual.type; Sort rsort = right.actual.sort; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java index d7f7ab7e4b6..aadcedd42ce 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EConditional.java @@ -79,6 +79,7 @@ public final class EConditional extends AExpression { @Override void write(MethodWriter writer) { writer.writeDebugInfo(offset); + Label localfals = new Label(); Label end = new Label(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java index 2122174442d..63e80d77cb4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java @@ -166,6 +166,7 @@ public final class EUnary extends AExpression { @Override void write(MethodWriter writer) { writer.writeDebugInfo(offset); + if (operation == Operation.NOT) { if (tru == null && fals == null) { Label localfals = new Label(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java index 02e8ceacd9f..cef45eed0a7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java @@ -83,5 +83,4 @@ public final class LBrace extends ALink { writer.writeDebugInfo(offset); writer.arrayStore(after.type); } - } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java index 30775e3a6e7..a4e05c38316 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LCall.java @@ -92,6 +92,7 @@ public final class LCall extends ALink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + for (AExpression argument : arguments) { argument.write(writer); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java index b00eafe3f9f..27bce1d6533 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefArray.java @@ -59,6 +59,7 @@ final class LDefArray extends ALink implements IDefLink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + String desc = Type.getMethodDescriptor(after.type, Definition.DEF_TYPE.type, index.actual.type); writer.invokeDynamic("arrayLoad", desc, DEF_BOOTSTRAP_HANDLE, (Object)DefBootstrap.ARRAY_LOAD); } @@ -66,6 +67,7 @@ final class LDefArray extends ALink implements IDefLink { @Override void store(MethodWriter writer) { writer.writeDebugInfo(offset); + String desc = Type.getMethodDescriptor(Definition.VOID_TYPE.type, Definition.DEF_TYPE.type, index.actual.type, after.type); writer.invokeDynamic("arrayStore", desc, DEF_BOOTSTRAP_HANDLE, (Object)DefBootstrap.ARRAY_STORE); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java index 0292ac3c589..bf7ea1778df 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefCall.java @@ -68,6 +68,7 @@ final class LDefCall extends ALink implements IDefLink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + StringBuilder signature = new StringBuilder(); signature.append('('); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java index e762de9e862..b9937d735ec 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LDefField.java @@ -56,6 +56,7 @@ final class LDefField extends ALink implements IDefLink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + String desc = Type.getMethodDescriptor(after.type, Definition.DEF_TYPE.type); writer.invokeDynamic(value, desc, DEF_BOOTSTRAP_HANDLE, (Object)DefBootstrap.LOAD); } @@ -63,6 +64,7 @@ final class LDefField extends ALink implements IDefLink { @Override void store(MethodWriter writer) { writer.writeDebugInfo(offset); + String desc = Type.getMethodDescriptor(Definition.VOID_TYPE.type, Definition.DEF_TYPE.type, after.type); writer.invokeDynamic(value, desc, DEF_BOOTSTRAP_HANDLE, (Object)DefBootstrap.STORE); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java index 88173ad3f65..50f67d0d7f6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LField.java @@ -106,6 +106,7 @@ public final class LField extends ALink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isStatic(field.modifiers)) { writer.getStatic(field.owner.type, field.javaName, field.type.type); } else { @@ -116,6 +117,7 @@ public final class LField extends ALink { @Override void store(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isStatic(field.modifiers)) { writer.putStatic(field.owner.type, field.javaName, field.type.type); } else { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java index b628fee5f37..357c9478254 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LListShortcut.java @@ -80,6 +80,7 @@ final class LListShortcut extends ALink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isInterface(getter.owner.clazz.getModifiers())) { writer.invokeInterface(getter.owner.type, getter.method); } else { @@ -94,6 +95,7 @@ final class LListShortcut extends ALink { @Override void store(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isInterface(setter.owner.clazz.getModifiers())) { writer.invokeInterface(setter.owner.type, setter.method); } else { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java index a500a6673fc..5580ef3d01f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LMapShortcut.java @@ -79,6 +79,7 @@ final class LMapShortcut extends ALink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isInterface(getter.owner.clazz.getModifiers())) { writer.invokeInterface(getter.owner.type, getter.method); } else { @@ -93,6 +94,7 @@ final class LMapShortcut extends ALink { @Override void store(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isInterface(setter.owner.clazz.getModifiers())) { writer.invokeInterface(setter.owner.type, setter.method); } else { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java index c555f04ab95..21340caac61 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LNewArray.java @@ -80,6 +80,7 @@ public final class LNewArray extends ALink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + for (AExpression argument : arguments) { argument.write(writer); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java index 7eb44156e55..42128ad86a6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LShortcut.java @@ -85,6 +85,7 @@ final class LShortcut extends ALink { @Override void load(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isInterface(getter.owner.clazz.getModifiers())) { writer.invokeInterface(getter.owner.type, getter.method); } else { @@ -99,6 +100,7 @@ final class LShortcut extends ALink { @Override void store(MethodWriter writer) { writer.writeDebugInfo(offset); + if (java.lang.reflect.Modifier.isInterface(setter.owner.clazz.getModifiers())) { writer.invokeInterface(setter.owner.type, setter.method); } else { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java index 0f05243f994..1790ebbbfc1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java @@ -22,7 +22,6 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Variables; import org.elasticsearch.painless.MethodWriter; -import java.util.Collections; import java.util.List; /** @@ -35,27 +34,27 @@ public final class SBlock extends AStatement { public SBlock(int line, int offset, String location, List statements) { super(line, offset, location); - this.statements = Collections.unmodifiableList(statements); + this.statements = statements; } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { if (statements == null || statements.isEmpty()) { throw new IllegalArgumentException(error("A block must contain at least one statement.")); } - final AStatement last = statements.get(statements.size() - 1); - - for (AStatement statement : statements) { + for (int index = 0; index < statements.size(); ++index) { if (allEscape) { throw new IllegalArgumentException(error("Unreachable statement.")); } - statement.inLoop = inLoop; - statement.lastSource = lastSource && statement == last; - statement.lastLoop = (beginLoop || lastLoop) && statement == last; + AStatement statement = statements.get(index); - statement.analyze(variables); + statement.inLoop = inLoop; + statement.lastSource = lastSource && index == statements.size() - 1; + statement.lastLoop = (beginLoop || lastLoop) && index == statements.size() - 1; + + statements.set(index, statement.analyze(variables)); methodEscape = statement.methodEscape; loopEscape = statement.loopEscape; @@ -64,6 +63,8 @@ public final class SBlock extends AStatement { anyBreak |= statement.anyBreak; statementCount += statement.statementCount; } + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java index 4cd1decb26b..14a27bcbaac 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java @@ -32,7 +32,7 @@ public final class SBreak extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { if (!inLoop) { throw new IllegalArgumentException(error("Break statement outside of a loop.")); } @@ -41,6 +41,8 @@ public final class SBreak extends AStatement { allEscape = true; anyBreak = true; statementCount = 1; + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java index 3fb8c8b4680..a35ca88d7f6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java @@ -34,7 +34,7 @@ public final class SCatch extends AStatement { final String type; final String name; - final SBlock block; + AStatement block; Variable variable; @@ -51,7 +51,7 @@ public final class SCatch extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { final Type type; try { @@ -71,7 +71,7 @@ public final class SCatch extends AStatement { block.inLoop = inLoop; block.lastLoop = lastLoop; - block.analyze(variables); + block = block.analyze(variables); methodEscape = block.methodEscape; loopEscape = block.loopEscape; @@ -80,11 +80,14 @@ public final class SCatch extends AStatement { anyBreak = block.anyBreak; statementCount = block.statementCount; } + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label jump = new Label(); writer.mark(jump); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java index 18ce5c81231..c5b486db942 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java @@ -32,7 +32,7 @@ public final class SContinue extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { if (!inLoop) { throw new IllegalArgumentException(error("Continue statement outside of a loop.")); } @@ -44,6 +44,8 @@ public final class SContinue extends AStatement { allEscape = true; anyContinue = true; statementCount = 1; + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java index ddfe54c22eb..214d7beb576 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java @@ -22,7 +22,6 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.Variables; import org.elasticsearch.painless.MethodWriter; -import java.util.Collections; import java.util.List; /** @@ -35,21 +34,25 @@ public final class SDeclBlock extends AStatement { public SDeclBlock(int line, int offset, String location, List declarations) { super(line, offset, location); - this.declarations = Collections.unmodifiableList(declarations); + this.declarations = declarations; } @Override - void analyze(Variables variables) { - for (SDeclaration declaration : declarations) { - declaration.analyze(variables); + AStatement analyze(Variables variables) { + for (int index = 0; index < declarations.size(); ++index) { + AStatement declaration = declarations.get(index); + + declarations.set(index, (SDeclaration)declaration.analyze(variables)); } statementCount = declarations.size(); + + return this; } @Override void write(MethodWriter writer) { - for (SDeclaration declaration : declarations) { + for (AStatement declaration : declarations) { declaration.write(writer); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java index 526d90dca26..6dc0a1ad78a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java @@ -46,7 +46,7 @@ public final class SDeclaration extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { final Type type; try { @@ -62,11 +62,14 @@ public final class SDeclaration extends AStatement { } variable = variables.addVariable(location, type, name, false, false); + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + if (expression == null) { switch (variable.type.sort) { case VOID: throw new IllegalStateException(error("Illegal tree structure.")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java index 3b854ea4ebf..efd287669c7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java @@ -30,7 +30,7 @@ import org.elasticsearch.painless.MethodWriter; public final class SDo extends AStatement { final int maxLoopCounter; - final SBlock block; + AStatement block; AExpression condition; public SDo(int line, int offset, String location, int maxLoopCounter, SBlock block, AExpression condition) { @@ -42,7 +42,7 @@ public final class SDo extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { variables.incrementScope(); if (block == null) { @@ -52,7 +52,7 @@ public final class SDo extends AStatement { block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block = block.analyze(variables); if (block.loopEscape && !block.anyContinue) { throw new IllegalArgumentException(error("Extraneous do while loop.")); @@ -82,11 +82,14 @@ public final class SDo extends AStatement { } variables.decrementScope(); + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label start = new Label(); Label begin = new Label(); Label end = new Label(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java index 37f1cbeb24c..d15a57422b6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java @@ -38,7 +38,7 @@ public final class SExpression extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { expression.read = lastSource; expression.analyze(variables); @@ -56,6 +56,8 @@ public final class SExpression extends AStatement { loopEscape = rtn; allEscape = rtn; statementCount = 1; + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java index ac8e721a1f6..1bb18bea6c8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java @@ -33,7 +33,7 @@ public final class SFor extends AStatement { ANode initializer; AExpression condition; AExpression afterthought; - final SBlock block; + AStatement block; public SFor(int line, int offset, String location, int maxLoopCounter, ANode initializer, AExpression condition, AExpression afterthought, SBlock block) { @@ -47,14 +47,14 @@ public final class SFor extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { variables.incrementScope(); boolean continuous = false; if (initializer != null) { - if (initializer instanceof SDeclBlock) { - ((SDeclBlock)initializer).analyze(variables); + if (initializer instanceof AStatement) { + initializer = ((AStatement)initializer).analyze(variables); } else if (initializer instanceof AExpression) { AExpression initializer = (AExpression)this.initializer; @@ -102,7 +102,7 @@ public final class SFor extends AStatement { block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block = block.analyze(variables); if (block.loopEscape && !block.anyContinue) { throw new IllegalArgumentException(error("Extraneous for loop.")); @@ -123,11 +123,14 @@ public final class SFor extends AStatement { } variables.decrementScope(); + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label start = new Label(); Label begin = afterthought == null ? start : new Label(); Label end = new Label(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java index 1f498215478..46751e8b449 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java @@ -30,7 +30,7 @@ import org.elasticsearch.painless.MethodWriter; public final class SIf extends AStatement { AExpression condition; - final SBlock ifblock; + AStatement ifblock; public SIf(int line, int offset, String location, AExpression condition, SBlock ifblock) { super(line, offset, location); @@ -40,7 +40,7 @@ public final class SIf extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { condition.expected = Definition.BOOLEAN_TYPE; condition.analyze(variables); condition = condition.cast(variables); @@ -58,17 +58,20 @@ public final class SIf extends AStatement { ifblock.lastLoop = lastLoop; variables.incrementScope(); - ifblock.analyze(variables); + ifblock = ifblock.analyze(variables); variables.decrementScope(); anyContinue = ifblock.anyContinue; anyBreak = ifblock.anyBreak; statementCount = ifblock.statementCount; + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label fals = new Label(); condition.fals = fals; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java index d8fd5c0756e..f484819c4a2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java @@ -30,8 +30,8 @@ import org.elasticsearch.painless.MethodWriter; public final class SIfElse extends AStatement { AExpression condition; - final SBlock ifblock; - final SBlock elseblock; + AStatement ifblock; + AStatement elseblock; public SIfElse(int line, int offset, String location, AExpression condition, SBlock ifblock, SBlock elseblock) { super(line, offset, location); @@ -42,7 +42,7 @@ public final class SIfElse extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { condition.expected = Definition.BOOLEAN_TYPE; condition.analyze(variables); condition = condition.cast(variables); @@ -60,7 +60,7 @@ public final class SIfElse extends AStatement { ifblock.lastLoop = lastLoop; variables.incrementScope(); - ifblock.analyze(variables); + ifblock = ifblock.analyze(variables); variables.decrementScope(); anyContinue = ifblock.anyContinue; @@ -76,7 +76,7 @@ public final class SIfElse extends AStatement { elseblock.lastLoop = lastLoop; variables.incrementScope(); - elseblock.analyze(variables); + elseblock = elseblock.analyze(variables); variables.decrementScope(); methodEscape = ifblock.methodEscape && elseblock.methodEscape; @@ -85,11 +85,14 @@ public final class SIfElse extends AStatement { anyContinue |= elseblock.anyContinue; anyBreak |= elseblock.anyBreak; statementCount = Math.max(ifblock.statementCount, elseblock.statementCount); + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label end = new Label(); Label fals = elseblock != null ? new Label() : end; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java index d6cc5598263..4af533cdf84 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java @@ -37,7 +37,7 @@ public final class SReturn extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { expression.expected = Definition.OBJECT_TYPE; expression.internal = true; expression.analyze(variables); @@ -48,6 +48,8 @@ public final class SReturn extends AStatement { allEscape = true; statementCount = 1; + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 899d54225dc..c1ac248b976 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -23,7 +23,6 @@ import org.elasticsearch.painless.Variables; import org.objectweb.asm.Opcodes; import org.elasticsearch.painless.MethodWriter; -import java.util.Collections; import java.util.List; /** @@ -36,32 +35,34 @@ public final class SSource extends AStatement { public SSource(int line, int offset, String location, List statements) { super(line, offset, location); - this.statements = Collections.unmodifiableList(statements); + this.statements = statements; } @Override - public void analyze(Variables variables) { + public AStatement analyze(Variables variables) { if (statements == null || statements.isEmpty()) { throw new IllegalArgumentException(error("Cannot generate an empty script.")); } variables.incrementScope(); - final AStatement last = statements.get(statements.size() - 1); + for (int index = 0; index < statements.size(); ++index) { + AStatement statement = statements.get(index); - for (AStatement statement : statements) { if (allEscape) { throw new IllegalArgumentException(error("Unreachable statement.")); } - statement.lastSource = statement == last; - statement.analyze(variables); + statement.lastSource = index == statements.size() - 1; + statements.set(index, statement.analyze(variables)); methodEscape = statement.methodEscape; allEscape = statement.allEscape; } variables.decrementScope(); + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java index 7c65aafbc56..fb0a260e3c9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java @@ -37,7 +37,7 @@ public final class SThrow extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { expression.expected = Definition.EXCEPTION_TYPE; expression.analyze(variables); expression = expression.cast(variables); @@ -46,6 +46,8 @@ public final class SThrow extends AStatement { loopEscape = true; allEscape = true; statementCount = 1; + + return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java index 0fdb70bdc82..41c5612625e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java @@ -23,7 +23,6 @@ import org.elasticsearch.painless.Variables; import org.objectweb.asm.Label; import org.elasticsearch.painless.MethodWriter; -import java.util.Collections; import java.util.List; /** @@ -31,18 +30,18 @@ import java.util.List; */ public final class STry extends AStatement { - final SBlock block; + AStatement block; final List catches; - public STry(int line, int offset, String location, SBlock block, List traps) { + public STry(int line, int offset, String location, SBlock block, List catches) { super(line, offset, location); this.block = block; - this.catches = Collections.unmodifiableList(traps); + this.catches = catches; } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { if (block == null) { throw new IllegalArgumentException(error("Extraneous try statement.")); } @@ -52,7 +51,7 @@ public final class STry extends AStatement { block.lastLoop = lastLoop; variables.incrementScope(); - block.analyze(variables); + block = block.analyze(variables); variables.decrementScope(); methodEscape = block.methodEscape; @@ -63,13 +62,15 @@ public final class STry extends AStatement { int statementCount = 0; - for (SCatch catc : catches) { + for (int index = 0; index < catches.size(); ++index) { + SCatch catc = catches.get(index); + catc.lastSource = lastSource; catc.inLoop = inLoop; catc.lastLoop = lastLoop; variables.incrementScope(); - catc.analyze(variables); + catches.set(index, (SCatch)catc.analyze(variables)); variables.decrementScope(); methodEscape &= catc.methodEscape; @@ -82,11 +83,14 @@ public final class STry extends AStatement { } this.statementCount = block.statementCount + statementCount; + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label begin = new Label(); Label end = new Label(); Label exception = new Label(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java index 43ac824dac7..3103b1b46d1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java @@ -31,7 +31,7 @@ public final class SWhile extends AStatement { final int maxLoopCounter; AExpression condition; - final SBlock block; + AStatement block; public SWhile(int line, int offset, String location, int maxLoopCounter, AExpression condition, SBlock block) { super(line, offset, location); @@ -42,7 +42,7 @@ public final class SWhile extends AStatement { } @Override - void analyze(Variables variables) { + AStatement analyze(Variables variables) { variables.incrementScope(); condition.expected = Definition.BOOLEAN_TYPE; @@ -67,7 +67,7 @@ public final class SWhile extends AStatement { block.beginLoop = true; block.inLoop = true; - block.analyze(variables); + block = block.analyze(variables); if (block.loopEscape && !block.anyContinue) { throw new IllegalArgumentException(error("Extraneous while loop.")); @@ -88,11 +88,14 @@ public final class SWhile extends AStatement { } variables.decrementScope(); + + return this; } @Override void write(MethodWriter writer) { writer.writeStatementOffset(offset); + Label begin = new Label(); Label end = new Label(); From 9a0d0d7cffa876639a425f785893fe83388836c1 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 2 Jun 2016 15:30:11 -0700 Subject: [PATCH 05/39] Fixed a grammar mistake in a comment. --- .../src/main/java/org/elasticsearch/painless/node/EChain.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java index af111b99995..5448c53b35c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java @@ -301,7 +301,7 @@ public final class EChain extends AExpression { if (!(expression instanceof EBinary) || ((EBinary)expression).operation != Operation.ADD || expression.actual.sort != Sort.STRING) { - writer.writeAppendStrings(expression.actual); // append the expression's value unless its also a concatenation + writer.writeAppendStrings(expression.actual); // append the expression's value unless it's also a concatenation } writer.writeToStrings(); // put the value of the StringBuilder on the stack From e6aaaf11ed4a1b8f595f920de1840938eddb7a97 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Fri, 3 Jun 2016 09:50:51 +0200 Subject: [PATCH 06/39] Reworked docs for index-shrink API (#18705) --- docs/reference/indices/shrink-index.asciidoc | 163 +++++++++++++------ 1 file changed, 112 insertions(+), 51 deletions(-) diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index db6f717a82b..d531477e77c 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -1,64 +1,125 @@ [[indices-shrink-index]] == Shrink Index -The shrink index API allows to shrink an existing index into a new index with a single shard. -In order to shrink an index, all its shards must be allocated on a single node in the cluster. -This is required since the shrink command will copy all shards index files into the target index -data folder when the primary of the target index is initially allocated. +The shrink index API allows you to shrink an existing index into a new index +with a single primary shard. Before shrinking, a (primary or replica) copy of +every shard in the index must be present on the same node. -When an index is shrunk no write operations should happen to the source index. Elasticsearch will -enforce the `read-only` property when the shrink command is executed. All operations necessary to shrink the -source index are executed during initial primary recovery. Once the target index primary shard is started the -shrink operation has successfully finished. To monitor status and progress use <> +Shrinking works as follows: +* First, it creates a new target index with the same definition as the source + index, but with a single primary shard. -To shrink and index all shards of that index must be allocated on a single node. +* Then it hard-links segments from the source index into the target index. (If + the file system doesn't support hard-linking, then all segments are copied + into the new index, which is a much more time consuming process.) -[source,js] --------------------------------------------------- -$ curl -XPUT 'http://localhost:9200/logs/_settings' -d '{ - "settings" : { - "index.routing.allocation.require._name" : "shrink_node_name", <1> - "index.blocks.write" : true <2> - } -}' --------------------------------------------------- -<1> Forces the relocation of all of the indices shards to the node `shrink_node_name` -<2> Prevents write operations to this index while still allowing metadata changes like deleting the index. - -The above second curl example shows how an index called `logs` can be -forced to allocate at least one copy of each shard on a specific node in the cluster. - -The `_shrink` API is similar to <> and accepts `settings` and `aliases` for the target index. - -[source,js] --------------------------------------------------- -$ curl -XPUT 'http://localhost:9200/logs/_shrink/logs_single_shard' -d '{ - "settings" : { - "index.codec" : "best_compression", <1> - } -}' --------------------------------------------------- -<1> Enables `best_compression` codec on the target index - -The API call above returns immediately once the target index is created but doesn't wait -for the shrink operation to start. Once the target indices primary shard moves to state `initializing` -the shrink operation has started. +* Finally, it recovers the target index as though it were a closed index which + had just been re-opened. [float] -[[shrink-index-limitations]] -=== Limitations +=== Preparing an index for shrinking -Indices can only be shrunk into a single shard if they fully the following requirements: +In order to shrink an index, the index must be marked as read-only, and a +(primary or replica) copy of every shard in the index must be relocated to the +same node and have <> `green`. + +These two conditions can be achieved with the following request: + +[source,js] +-------------------------------------------------- +PUT /my_source_index/_settings +{ + "settings": { + "index.routing.allocation.require._name": "shrink_node_name", <1> + "index.blocks.write": true <2> + } +} +-------------------------------------------------- +<1> Forces the relocation of a copy of each shard to the node with name + `shrink_node_name`. See <> for more options. + +<2> Prevents write operations to this index while still allowing metadata + changes like deleting the index. + +It can take a while to relocate the source index. Progress can be tracked +with the <>, or the <> can be used to wait until all shards have relocated +with the `wait_for_relocating_shards` parameter. + +[float] +=== Shrinking an index + +To shrink `my_source_index` into a new index called `my_target_index`, issue +the following request: + +[source,js] +-------------------------------------------------- +POST my_source_index/_shrink/my_target_index +-------------------------------------------------- + +The above request returns immediately once the target index has been added to +the cluster state -- it doesn't wait for the shrink operation to start. + +[IMPORTANT] +===================================== + +Indices can only be shrunk into a single shard if they satisfy the following requirements: - * an instance of all of the indices shards must be allocated on a single node - * the index must not contain more than `2.14 billion` documents (`2147483519`) in total (sum of all shards) - This is the maximum shard size elasticsearch can support. - * the index must have more than one shard - * the index must be `read-only`, ie. have a cluster block set `index.blocks.write=true` * the target index must not exist - * all `index.analysis.*` and `index.similarity.*` settings passed to the `_shrink` call will be overwritten with the - source indices settings. - * if the target index can't be allocated on the shrink node, due to throttling or other allocation deciders, - its primary shard will stay `unassigned` until it can be allocated on that node + +* The index must have more than one primary shard. + +* The index must not contain more than `2,147,483,519` documents in total + across all shards as this is the maximum number of docs that can fit into a + single shard. + +* The node handling the shrink process must have sufficient free disk space to + accommodate a second copy of the existing index. + +===================================== + +The `_shrink` API is similar to the <> +and accepts `settings` and `aliases` parameters for the target index: + +[source,js] +-------------------------------------------------- +POST my_source_index/_shrink/my_target_index +{ + "settings": { + "index.number_of_replicas": 1, + "index.codec": "best_compression" <1> + }, + "aliases": { + "my_search_indices": {} + } +} +-------------------------------------------------- + +<1> Best compression will only take affect when new writes are made to the + index, such as when <> the shard to a single + segment. + +NOTE: Mappings may not be specified in the `_shrink` request, and all +`index.analysis.*` and `index.similarity.*` settings will be overwritten with +the settings from the source index. + +[float] +=== Monitoring the shrink process + +The shrink process can be monitored with the <>, or the <> can be used to wait +until all primary shards have been allocated by setting the `wait_for_status` +parameter to `yellow`. + +The `_shrink` API returns as soon as the target index has been added to the +cluster state, before any shards have been allocated. At this point, all +shards are in the state `unassigned`. If, for any reason, the target index +can't be allocated on the shrink node, its primary shard will remain +`unassigned` until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state `initializing`, and the +shrink process begins. When the shrink operation completes, the shard will +become `active`. At that point, Elasticsearch will try to allocate any +replicas and may decide to relocate the primary shard to another node. From 78574d248cf10dad1c5a566fda79e1e0c94afe43 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Fri, 3 Jun 2016 11:37:22 +0200 Subject: [PATCH 07/39] [TEST] mute RandomAllocationDeciderTests.testRandomDecisions we have a pr already: https://github.com/elastic/elasticsearch/pull/18701 --- .../cluster/routing/allocation/RandomAllocationDeciderTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 307df91c302..1ec1ed8c37d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -54,6 +54,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { * amount of iterations the test allows allocation unless the same shard is * already allocated on a node and balances the cluster to gain optimal * balance.*/ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/18701") public void testRandomDecisions() { RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random()); AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, From 6c28235b03fdad898986f613e198c24ca3067e4d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 3 Jun 2016 13:42:09 +0200 Subject: [PATCH 08/39] Fix merge stats rendering in RestIndicesAction (#18720) give the table description: ``` table.addCell("merges.total", "sibling:pri;alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops"); table.addCell("pri.merges.total", "default:false;text-align:right;desc:number of completed merge ops"); table.addCell("merges.total_docs", "sibling:pri;alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged"); table.addCell("pri.merges.total_docs", "default:false;text-align:right;desc:docs merged"); table.addCell("merges.total_size", "sibling:pri;alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("pri.merges.total_size", "default:false;text-align:right;desc:size merged"); ``` this is how it should be. --- .../org/elasticsearch/rest/action/cat/RestIndicesAction.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index eaf9ad4a081..38cbba6fd5a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -412,13 +412,13 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getIndexFailedCount()); table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getCurrent()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrentSize()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrent()); table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getCurrentNumDocs()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrentNumDocs()); table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getCurrentSize()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrent()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getCurrentSize()); table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotal()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotal()); From 24a7b7224bd834eb57d1026ceb0c455b8c48a206 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 3 Jun 2016 14:11:34 +0200 Subject: [PATCH 09/39] Fix recovery throttling to properly handle relocating non-primary shards (#18701) Relocation of non-primary shards is realized by recovering from the primary shard. Recovery throttling wrongly equates non-primary relocation as recovering a shard from the non-primary relocation source, however. Closes #18640 --- .../cluster/routing/RoutingNodes.java | 89 +++++------ .../decider/ThrottlingAllocationDecider.java | 110 ++++++++----- .../routing/RandomShardRoutingMutator.java | 2 +- .../RandomAllocationDeciderTests.java | 1 - .../allocation/ThrottlingAllocationTests.java | 150 ++++++++++-------- 5 files changed, 199 insertions(+), 153 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index fbbedcdefde..a4e61eac739 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -108,18 +108,18 @@ public class RoutingNodes implements Iterable { // add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); - addInitialRecovery(targetShardRouting, routingTable); + addInitialRecovery(targetShardRouting, indexShard.primary); previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting); if (previousValue != null) { throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node"); } assignedShardsAdd(targetShardRouting); - } else if (shard.active() == false) { // shards that are initializing without being relocated + } else if (shard.initializing()) { if (shard.primary()) { inactivePrimaryCount++; } inactiveShardCount++; - addInitialRecovery(shard, routingTable); + addInitialRecovery(shard, indexShard.primary); } } else { unassignedShards.add(shard); @@ -134,48 +134,44 @@ public class RoutingNodes implements Iterable { } private void addRecovery(ShardRouting routing) { - addRecovery(routing, true, null); + updateRecoveryCounts(routing, true, findAssignedPrimaryIfPeerRecovery(routing)); } private void removeRecovery(ShardRouting routing) { - addRecovery(routing, false, null); + updateRecoveryCounts(routing, false, findAssignedPrimaryIfPeerRecovery(routing)); } - private void addInitialRecovery(ShardRouting routing, RoutingTable routingTable) { - addRecovery(routing, true, routingTable); + private void addInitialRecovery(ShardRouting routing, ShardRouting initialPrimaryShard) { + updateRecoveryCounts(routing, true, initialPrimaryShard); } - private void addRecovery(final ShardRouting routing, final boolean increment, final RoutingTable routingTable) { + private void updateRecoveryCounts(final ShardRouting routing, final boolean increment, @Nullable final ShardRouting primary) { final int howMany = increment ? 1 : -1; assert routing.initializing() : "routing must be initializing: " + routing; + // TODO: check primary == null || primary.active() after all tests properly add ReplicaAfterPrimaryActiveAllocationDecider + assert primary == null || primary.assignedToNode() : + "shard is initializing but its primary is not assigned to a node"; + Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany); - final String sourceNodeId; - if (routing.relocatingNodeId() != null) { // this is a relocation-target - sourceNodeId = routing.relocatingNodeId(); - if (routing.primary() && increment == false) { // primary is done relocating + + if (routing.isPeerRecovery()) { + // add/remove corresponding outgoing recovery on node with primary shard + if (primary == null) { + throw new IllegalStateException("shard is peer recovering but primary is unassigned"); + } + Recoveries.getOrAdd(recoveriesPerNode, primary.currentNodeId()).addOutgoing(howMany); + + if (increment == false && routing.primary() && routing.relocatingNodeId() != null) { + // primary is done relocating, move non-primary recoveries from old primary to new primary int numRecoveringReplicas = 0; for (ShardRouting assigned : assignedShards(routing.shardId())) { - if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) { + if (assigned.primary() == false && assigned.isPeerRecovery()) { numRecoveringReplicas++; } } - // we transfer the recoveries to the relocated primary - recoveriesPerNode.get(sourceNodeId).addOutgoing(-numRecoveringReplicas); + recoveriesPerNode.get(routing.relocatingNodeId()).addOutgoing(-numRecoveringReplicas); recoveriesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas); } - } else if (routing.primary() == false) { // primary without relocationID is initial recovery - ShardRouting primary = findPrimary(routing); - if (primary == null && routingTable != null) { - primary = routingTable.index(routing.index().getName()).shard(routing.shardId().id()).primary; - } else if (primary == null) { - throw new IllegalStateException("replica is initializing but primary is unassigned"); - } - sourceNodeId = primary.currentNodeId(); - } else { - sourceNodeId = null; - } - if (sourceNodeId != null) { - Recoveries.getOrAdd(recoveriesPerNode, sourceNodeId).addOutgoing(howMany); } } @@ -187,18 +183,21 @@ public class RoutingNodes implements Iterable { return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing(); } - private ShardRouting findPrimary(ShardRouting routing) { - List shardRoutings = assignedShards.get(routing.shardId()); + @Nullable + private ShardRouting findAssignedPrimaryIfPeerRecovery(ShardRouting routing) { ShardRouting primary = null; - if (shardRoutings != null) { - for (ShardRouting shardRouting : shardRoutings) { - if (shardRouting.primary()) { - if (shardRouting.active()) { - return shardRouting; - } else if (primary == null) { - primary = shardRouting; - } else if (primary.relocatingNodeId() != null) { - primary = shardRouting; + if (routing.isPeerRecovery()) { + List shardRoutings = assignedShards.get(routing.shardId()); + if (shardRoutings != null) { + for (ShardRouting shardRouting : shardRoutings) { + if (shardRouting.primary()) { + if (shardRouting.active()) { + return shardRouting; + } else if (primary == null) { + primary = shardRouting; + } else if (primary.relocatingNodeId() != null) { + primary = shardRouting; + } } } } @@ -500,7 +499,6 @@ public class RoutingNodes implements Iterable { ShardRouting relocationMarkerRemoved = shard.removeRelocationSource(); updateAssigned(shard, relocationMarkerRemoved); inactiveShardCount++; // relocation targets are not counted as inactive shards whereas initializing shards are - Recoveries.getOrAdd(recoveriesPerNode, shard.relocatingNodeId()).addOutgoing(-1); return relocationMarkerRemoved; } @@ -856,20 +854,17 @@ public class RoutingNodes implements Iterable { for (ShardRouting routing : routingNode) { if (routing.initializing()) { incoming++; - } else if (routing.relocating()) { - outgoing++; } - if (routing.primary() && (routing.initializing() && routing.relocatingNodeId() != null) == false) { // we don't count the initialization end of the primary relocation - List shardRoutings = routingNodes.assignedShards.get(routing.shardId()); - for (ShardRouting assigned : shardRoutings) { - if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) { + if (routing.primary() && routing.isPeerRecovery() == false) { + for (ShardRouting assigned : routingNodes.assignedShards.get(routing.shardId())) { + if (assigned.isPeerRecovery()) { outgoing++; } } } } } - assert incoming == value.incoming : incoming + " != " + value.incoming; + assert incoming == value.incoming : incoming + " != " + value.incoming + " node: " + routingNode; assert outgoing == value.outgoing : outgoing + " != " + value.outgoing + " node: " + routingNode; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 45afd07e297..286b378debc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -28,6 +28,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.THROTTLE; +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.YES; + /** * {@link ThrottlingAllocationDecider} controls the recovery process per node in * the cluster. It exposes two settings via the cluster update API that allow @@ -109,50 +112,83 @@ public class ThrottlingAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - if (shardRouting.primary()) { - assert shardRouting.unassigned() || shardRouting.active(); - if (shardRouting.unassigned()) { - // primary is unassigned, means we are going to do recovery from gateway - // count *just the primary* currently doing recovery on the node and check against concurrent_recoveries - int primariesInRecovery = 0; - for (ShardRouting shard : node) { - // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node* - // we only count initial recoveries here, so we need to make sure that relocating node is null - if (shard.initializing() && shard.primary() && shard.relocatingNodeId() == null) { - primariesInRecovery++; - } + if (shardRouting.primary() && shardRouting.unassigned()) { + assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery() == false; + // primary is unassigned, means we are going to do recovery from store, snapshot or local shards + // count *just the primaries* currently doing recovery on the node and check against primariesInitialRecoveries + + int primariesInRecovery = 0; + for (ShardRouting shard : node) { + // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node* + // we only count initial recoveries here, so we need to make sure that relocating node is null + if (shard.initializing() && shard.primary() && shard.relocatingNodeId() == null) { + primariesInRecovery++; } - if (primariesInRecovery >= primariesInitialRecoveries) { - return allocation.decision(Decision.THROTTLE, NAME, "too many primaries are currently recovering [%d], limit: [%d]", - primariesInRecovery, primariesInitialRecoveries); + } + if (primariesInRecovery >= primariesInitialRecoveries) { + // TODO: Should index creation not be throttled for primary shards? + return allocation.decision(THROTTLE, NAME, "too many primaries are currently recovering [%d], limit: [%d]", + primariesInRecovery, primariesInitialRecoveries); + } else { + return allocation.decision(YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries); + } + } else { + // Peer recovery + assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery(); + + // Allocating a shard to this node will increase the incoming recoveries + int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId()); + if (currentInRecoveries >= concurrentIncomingRecoveries) { + return allocation.decision(THROTTLE, NAME, "too many incoming shards are currently recovering [%d], limit: [%d]", + currentInRecoveries, concurrentIncomingRecoveries); + } else { + // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node + ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); + if (primaryShard == null) { + return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); + } + int primaryNodeOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId()); + if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) { + return allocation.decision(THROTTLE, NAME, "too many outgoing shards are currently recovering [%d], limit: [%d]", + primaryNodeOutRecoveries, concurrentOutgoingRecoveries); } else { - return allocation.decision(Decision.YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries); + return allocation.decision(YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]", + primaryNodeOutRecoveries, + concurrentOutgoingRecoveries, + currentInRecoveries, + concurrentIncomingRecoveries); } } } - // TODO should we allow shards not allocated post API to always allocate? - // either primary or replica doing recovery (from peer shard) - - // count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING) - return canAllocate(node, allocation); } - @Override - public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { - int currentOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(node.nodeId()); - int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId()); - if (currentOutRecoveries >= concurrentOutgoingRecoveries) { - return allocation.decision(Decision.THROTTLE, NAME, "too many outgoing shards are currently recovering [%d], limit: [%d]", - currentOutRecoveries, concurrentOutgoingRecoveries); - } else if (currentInRecoveries >= concurrentIncomingRecoveries) { - return allocation.decision(Decision.THROTTLE, NAME, "too many incoming shards are currently recovering [%d], limit: [%d]", - currentInRecoveries, concurrentIncomingRecoveries); - } else { - return allocation.decision(Decision.YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]", - currentOutRecoveries, - concurrentOutgoingRecoveries, - currentInRecoveries, - concurrentIncomingRecoveries); + /** + * The shard routing passed to {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} is not the initializing shard to this + * node but: + * - the unassigned shard routing in case if we want to assign an unassigned shard to this node. + * - the initializing shard routing if we want to assign the initializing shard to this node instead + * - the started shard routing in case if we want to check if we can relocate to this node. + * - the relocating shard routing if we want to relocate to this node now instead. + * + * This method returns the corresponding initializing shard that would be allocated to this node. + */ + private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) { + final ShardRouting initializingShard; + if (shardRouting.unassigned()) { + initializingShard = shardRouting.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + } else if (shardRouting.initializing()) { + initializingShard = shardRouting.moveToUnassigned(shardRouting.unassignedInfo()) + .initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + } else if (shardRouting.relocating()) { + initializingShard = shardRouting.cancelRelocation() + .relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) + .buildTargetRelocatingShard(); + } else { + assert shardRouting.started(); + initializingShard = shardRouting.relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) + .buildTargetRelocatingShard(); } + assert initializingShard.initializing(); + return initializingShard; } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java index 5d24b3d1e17..a470ca9f9ae 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RandomShardRoutingMutator.java @@ -34,7 +34,7 @@ public final class RandomShardRoutingMutator { public static ShardRouting randomChange(ShardRouting shardRouting, String[] nodes) { switch (randomInt(2)) { case 0: - if (shardRouting.unassigned() == false) { + if (shardRouting.unassigned() == false && shardRouting.primary() == false) { shardRouting = shardRouting.moveToUnassigned(new UnassignedInfo(randomReason(), randomAsciiOfLength(10))); } else if (shardRouting.unassignedInfo() != null) { shardRouting = shardRouting.updateUnassignedInfo(new UnassignedInfo(randomReason(), randomAsciiOfLength(10))); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 1ec1ed8c37d..307df91c302 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -54,7 +54,6 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { * amount of iterations the test allows allocation unless the same shard is * already allocated on a node and balances the cluster to gain optimal * balance.*/ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/18701") public void testRandomDecisions() { RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(random()); AllocationService strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 61a72bc352a..ada5f4c19f8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -19,11 +19,13 @@ package org.elasticsearch.cluster.routing.allocation; +import com.carrotsearch.hppc.IntHashSet; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; @@ -31,6 +33,8 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESAllocationTestCase; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -57,9 +61,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1)) .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsNew(metaData.index("test")) - .build(); + RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test")); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); @@ -118,9 +120,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1)) .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsNew(metaData.index("test")) - .build(); + RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test")); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); @@ -188,9 +188,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(9).numberOfReplicas(0)) .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsNew(metaData.index("test")) - .build(); + RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test")); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); @@ -242,89 +240,107 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); } - public void testOutgoingThrottlesAllocaiton() { - Settings settings = Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 1) - .put("cluster.routing.allocation.node_initial_primaries_recoveries", 1) - .put("cluster.routing.allocation.cluster_concurrent_rebalance", 1) - .build(); - AllocationService strategy = createAllocationService(settings); + public void testOutgoingThrottlesAllocation() { + AllocationService strategy = createAllocationService(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_outgoing_recoveries", 1) + .build()); + + logger.info("Building initial routing table"); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(0)) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsNew(metaData.index("test")) - .build(); + RoutingTable routingTable = createRecoveryRoutingTable(metaData.index("test")); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + logger.info("start one node, do reroute, only 1 should initialize"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0)); - assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3)); - assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0)); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 1); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 1); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 1); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node3"), 0); + assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0)); + assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1)); + assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2)); + + logger.info("start initializing"); routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 0); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node3"), 0); + assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1)); + assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0)); + assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2)); - RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", clusterState.getRoutingNodes().node("node1").iterator().next().shardId().id(), "node1", "node2")), false, false); - assertEquals(reroute.explanations().explanations().size(), 1); - assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.YES); - routingTable = reroute.routingTable(); + logger.info("start one more node, first non-primary should start being allocated"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 1); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node3"), 0); - // outgoing throttles - reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", clusterState.getRoutingNodes().node("node3").iterator().next().shardId().id(), "node3", "node1")), true, false); - assertEquals(reroute.explanations().explanations().size(), 1); - assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 1); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 0); + assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1)); + assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1)); + assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(1)); assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node3"), 0); + + logger.info("start initializing non-primary"); + routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2)); + assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0)); + assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(1)); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); + + logger.info("start one more node, initializing second non-primary"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2)); assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1)); - assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(1)); assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0)); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); - // incoming throttles - reroute = strategy.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand("test", clusterState.getRoutingNodes().node("node3").iterator().next().shardId().id(), "node3", "node2")), true, false); + logger.info("start one more node"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4"))).build(); + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); + + logger.info("move started non-primary to new node"); + RoutingAllocation.Result reroute = strategy.reroute(clusterState, new AllocationCommands( + new MoveAllocationCommand("test", 0, "node2", "node4")), true, false); assertEquals(reroute.explanations().explanations().size(), 1); assertEquals(reroute.explanations().explanations().get(0).decisions().type(), Decision.Type.THROTTLE); + // even though it is throttled, move command still forces allocation - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node1"), 0); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node2"), 1); - assertEquals(clusterState.getRoutingNodes().getIncomingRecoveries("node3"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); - assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node3"), 0); - assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(2)); - assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1)); + clusterState = ClusterState.builder(clusterState).routingResult(reroute).build(); + routingTable = clusterState.routingTable(); + assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(1)); assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(1)); + assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2)); assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0)); - + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 2); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node2"), 0); } + + private RoutingTable createRecoveryRoutingTable(IndexMetaData indexMetaData) { + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + switch (randomInt(5)) { + case 0: routingTableBuilder.addAsRecovery(indexMetaData); break; + case 1: routingTableBuilder.addAsFromCloseToOpen(indexMetaData); break; + case 2: routingTableBuilder.addAsFromDangling(indexMetaData); break; + case 3: routingTableBuilder.addAsNewRestore(indexMetaData, + new RestoreSource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT, + indexMetaData.getIndex().getName()), new IntHashSet()); break; + case 4: routingTableBuilder.addAsRestore(indexMetaData, + new RestoreSource(new Snapshot("repo", new SnapshotId("snap", "randomId")), Version.CURRENT, + indexMetaData.getIndex().getName())); break; + case 5: routingTableBuilder.addAsNew(indexMetaData); break; + default: throw new IndexOutOfBoundsException(); + } + + return routingTableBuilder.build(); + } + } From d55f719f8abe56a25f9d78999ef356a260599a78 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Fri, 3 Jun 2016 16:37:28 +0200 Subject: [PATCH 10/39] [TEST] wait for yellow after setup doc tests (#18726) * [TEST] wait for yellow after setup doc tests We have many places in the doc where we expect and index to be yellow before we execute a query. Therefore we have to always wait for yellow after setup. --- .../doc/RestTestsFromSnippetsTask.groovy | 7 + docs/out | 11165 ++++++++++++++++ .../reference/query-dsl/nested-query.asciidoc | 1 - .../query-dsl/parent-id-query.asciidoc | 1 - 4 files changed, 11172 insertions(+), 2 deletions(-) create mode 100644 docs/out diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 9f840df36e1..c9f5668a326 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -170,6 +170,13 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { current.println('---') current.println("setup:") body(setup) + // always wait for yellow before anything is executed + current.println( + " - do:\n" + + " raw:\n" + + " method: GET\n" + + " path: \"_cluster/health\"\n" + + " wait_for_status: \"yellow\"") } private void body(Snippet snippet) { diff --git a/docs/out b/docs/out new file mode 100644 index 00000000000..5a0af906e79 --- /dev/null +++ b/docs/out @@ -0,0 +1,11165 @@ +:buildSrc:compileJava UP-TO-DATE +:buildSrc:compileGroovy +:buildSrc:writeVersionProperties UP-TO-DATE +:buildSrc:processResources UP-TO-DATE +:buildSrc:classes +:buildSrc:jar +:buildSrc:assemble +:buildSrc:compileTestJava UP-TO-DATE +:buildSrc:compileTestGroovy UP-TO-DATE +:buildSrc:processTestResources UP-TO-DATE +:buildSrc:testClasses UP-TO-DATE +:buildSrc:test UP-TO-DATE +:buildSrc:check UP-TO-DATE +:buildSrc:build +======================================= +Elasticsearch Build Hamster says Hello! +======================================= + Gradle Version : 2.13 + OS Info : Linux 3.13.0-39-generic (amd64) + JDK Version : Oracle Corporation 1.8.0_65 [Java HotSpot(TM) 64-Bit Server VM 25.65-b01] + JAVA_HOME : /usr/java/jdk1.8.0_65 +:docs:clean +:docs:processTestResources UP-TO-DATE +:rest-api-spec:compileJava UP-TO-DATE +:rest-api-spec:processResources UP-TO-DATE +:rest-api-spec:classes UP-TO-DATE +:rest-api-spec:jar UP-TO-DATE +:docs:copyRestSpec +:docs:buildRestTests +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "title": { + "type": "text", + "analyzer": "standard" + } + } + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "my_number": { + "type": "long", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "whitespace", + "text": "The quick brown fox." +} +emit snippet +body part: +{ + "tokenizer": "standard", + "filter": [ "lowercase", "asciifolding" ], + "text": "Is this déja vu?" +} +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "std_folded": { + "type": "custom", + "tokenizer": "standard", + "filter": [ + "lowercase", + "asciifolding" + ] + } + } + } + }, + "mappings": { + "my_type": { + "properties": { + "my_text": { + "type": "text", + "analyzer": "std_folded" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "std_folded", + "text": "Is this déjà vu?" +} +emit snippet +body part: +{ + "field": "my_text", + "text": "Is this déjà vu?" +} +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "classic", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "classic", + "max_token_length": 5 + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "whitespace", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "ngram", + "text": "Quick Fox" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "ngram", + "min_gram": 3, + "max_gram": 3, + "token_chars": [ + "letter", + "digit" + ] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "2 Quick Foxes." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "letter", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "pattern", + "text": "The foo_bar_size's default is 5." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "pattern", + "pattern": "," + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "comma,separated,values" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "pattern", + "pattern": "\"((?:\\\\\"|[^\"]|\\\\\")+)\"", + "group": 1 + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "\"value\", \"value with embedded \\\" quote\"" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "edge_ngram", + "text": "Quick Fox" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "edge_ngram", + "min_gram": 2, + "max_gram": 10, + "token_chars": [ + "letter", + "digit" + ] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "2 Quick Foxes." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "autocomplete": { + "tokenizer": "autocomplete", + "filter": [ + "lowercase" + ] + }, + "autocomplete_search": { + "tokenizer": "lowercase" + } + }, + "tokenizer": { + "autocomplete": { + "type": "edge_ngram", + "min_gram": 2, + "max_gram": 10, + "token_chars": [ + "letter" + ] + } + } + } + }, + "mappings": { + "doc": { + "properties": { + "title": { + "type": "text", + "analyzer": "autocomplete", + "search_analyzer": "autocomplete_search" + } + } + } + } +} +emit snippet +body part: +{ + "title": "Quick Foxes" +} +emit snippet +emit snippet +body part: +{ + "query": { + "match": { + "title": { + "query": "Quick Fo", + "operator": "and" + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "uax_url_email", + "text": "Email me at john.smith@global-international.com" +} +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "uax_url_email", + "max_token_length": 5 + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "john.smith@global-international.com" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "path_hierarchy", + "text": "/one/two/three" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "path_hierarchy", + "delimiter": "-", + "replacement": "/", + "skip": 2 + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "one-two-three-four-five" +} +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "standard", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "my_tokenizer" + } + }, + "tokenizer": { + "my_tokenizer": { + "type": "standard", + "max_token_length": 5 + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "keyword", + "text": "New York" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "thai", + "text": "การที่ได้ต้องแสดงว่างานดี" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "lowercase", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "standard", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_english_analyzer": { + "type": "standard", + "max_token_length": 5, + "stopwords": "_english_" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_english_analyzer", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "simple", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "whitespace", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "keyword", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "std_english": { + "type": "standard", + "stopwords": "_english_" + } + } + } + }, + "mappings": { + "my_type": { + "properties": { + "my_text": { + "type": "text", + "analyzer": "standard", + "fields": { + "english": { + "type": "text", + "analyzer": "std_english" + } + } + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "field": "my_text", + "text": "The old brown cow" +} +emit snippet +body part: +{ + "field": "my_text.english", + "text": "The old brown cow" +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "stop", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_stop_analyzer": { + "type": "stop", + "stopwords": ["the", "over"] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_stop_analyzer", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_custom_analyzer": { + "type": "custom", + "tokenizer": "standard", + "char_filter": [ + "html_strip" + ], + "filter": [ + "lowercase", + "asciifolding" + ] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_custom_analyzer", + "text": "Is this déjà vu?" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_custom_analyzer": { + "type": "custom", + "char_filter": [ + "emoticons" + ], + "tokenizer": "punctuation", + "filter": [ + "lowercase", + "english_stop" + ] + } + }, + "tokenizer": { + "punctuation": { + "type": "pattern", + "pattern": "[ .,!?]" + } + }, + "char_filter": { + "emoticons": { + "type": "mapping", + "mappings": [ + ":) => _happy_", + ":( => _sad_" + ] + } + }, + "filter": { + "english_stop": { + "type": "stop", + "stopwords": "_english_" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_custom_analyzer", + "text": "I'm a :) person, and you?" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "pattern", + "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_email_analyzer": { + "type": "pattern", + "pattern": "\\W|_", + "lowercase": true + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_email_analyzer", + "text": "John_Smith@foo-bar.com" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "camel": { + "type": "pattern", + "pattern": "([^\\p{L}\\d]+)|(?<=\\D)(?=\\d)|(?<=\\d)(?=\\D)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "camel", + "text": "MooseX::FTPClass2_beta" +} +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "analyzer": "fingerprint", + "text": "Yes yes, Gödel said this sentence is consistent and." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_fingerprint_analyzer": { + "type": "fingerprint", + "stopwords": "_english_" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_fingerprint_analyzer", + "text": "Yes yes, Gödel said this sentence is consistent and." +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer": "keyword", + "char_filter": [ "html_strip" ], + "text": "

I'm so happy!

" +} +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "keyword", + "char_filter": ["my_char_filter"] + } + }, + "char_filter": { + "my_char_filter": { + "type": "html_strip", + "escaped_tags": ["b"] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "

I'm so happy!

" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "keyword", + "char_filter": [ + "my_char_filter" + ] + } + }, + "char_filter": { + "my_char_filter": { + "type": "mapping", + "mappings": [ + "٠ => 0", + "١ => 1", + "٢ => 2", + "٣ => 3", + "٤ => 4", + "٥ => 5", + "٦ => 6", + "٧ => 7", + "٨ => 8", + "٩ => 9" + ] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "My license plate is ٢٥٠١٥" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "standard", + "char_filter": [ + "my_char_filter" + ] + } + }, + "char_filter": { + "my_char_filter": { + "type": "mapping", + "mappings": [ + ":) => _happy_", + ":( => _sad_" + ] + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "I'm delighted about it :(" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "standard", + "char_filter": [ + "my_char_filter" + ] + } + }, + "char_filter": { + "my_char_filter": { + "type": "pattern_replace", + "pattern": "(\\d+)-(?=\\d)", + "replacement": "$1_" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "My credit card is 123-456-789" +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "analyzer": { + "my_analyzer": { + "tokenizer": "standard", + "char_filter": [ + "my_char_filter" + ], + "filter": [ + "lowercase" + ] + } + }, + "char_filter": { + "my_char_filter": { + "type": "pattern_replace", + "pattern": "(?<=\\p{Lower})(?=\\p{Upper})", + "replacement": " " + } + } + } + }, + "mappings": { + "my_type": { + "properties": { + "text": { + "type": "text", + "analyzer": "my_analyzer" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "analyzer": "my_analyzer", + "text": "The fooBarBaz method" +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +query part: refresh +body part: +{ + "text": "The fooBarBaz method" +} +emit snippet +body part: +{ + "query": { + "match": { + "text": "bar" + } + }, + "highlight": { + "fields": { + "text": {} + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match_all": {} + }, + "fielddata_fields" : ["test1", "test2"] +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "from" : 0, "size" : 10, + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "_source": false, + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "_source": "obj.*", + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "_source": [ "obj1.*", "obj2.*" ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "_source": { + "include": [ "obj1.*", "obj2.*" ], + "exclude": [ "*.description" ] + }, + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "explain": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match_all": {} + }, + "script_fields" : { + "test1" : { + "script" : "doc['my_field_name'].value * 2" + }, + "test2" : { + "script" : { + "inline": "doc['my_field_name'].value * factor", + "params" : { + "factor" : 2.0 + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: + { + "query" : { + "match_all": {} + }, + "script_fields" : { + "test1" : { + "script" : "_source.obj1.obj2" + } + } + } +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "fields" : { + "content" : {} + } + } +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "fields" : { + "content" : {"type" : "plain"} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "fields" : { + "content" : {"force_source" : true} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "pre_tags" : [""], + "post_tags" : [""], + "fields" : { + "_all" : {} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "pre_tags" : ["", ""], + "post_tags" : ["", ""], + "fields" : { + "_all" : {} + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "tags_schema" : "styled", + "fields" : { + "content" : {} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "fields" : { + "content" : {"fragment_size" : 150, "number_of_fragments" : 3} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "order" : "score", + "fields" : { + "content" : {"fragment_size" : 150, "number_of_fragments" : 3} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "fields" : { + "_all" : {}, + "bio.title" : {"number_of_fragments" : 0} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "fields" : { + "content" : { + "fragment_size" : 150, + "number_of_fragments" : 3, + "no_match_size": 150 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "fields": [ "_id" ], + "query" : { + "match": { + "content": { + "query": "foo bar" + } + } + }, + "rescore": { + "window_size": 50, + "query": { + "rescore_query" : { + "match_phrase": { + "content": { + "query": "foo bar", + "phrase_slop": 1 + } + } + }, + "rescore_query_weight" : 10 + } + }, + "highlight" : { + "order" : "score", + "fields" : { + "content" : { + "fragment_size" : 150, + "number_of_fragments" : 3, + "highlight_query": { + "bool": { + "must": { + "match": { + "content": { + "query": "foo bar" + } + } + }, + "should": { + "match_phrase": { + "content": { + "query": "foo bar", + "phrase_slop": 1, + "boost": 10.0 + } + } + }, + "minimum_should_match": 0 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "number_of_fragments" : 3, + "fragment_size" : 150, + "fields" : { + "_all" : { "pre_tags" : [""], "post_tags" : [""] }, + "bio.title" : { "number_of_fragments" : 0 }, + "bio.author" : { "number_of_fragments" : 0 }, + "bio.content" : { "number_of_fragments" : 5, "order" : "score" } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match": { "user": "kimchy" } + }, + "highlight" : { + "require_field_match": false, + "fields": { + "_all" : { "pre_tags" : [""], "post_tags" : [""] } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string": { + "query": "content.plain:running scissors", + "fields": ["content"] + } + }, + "highlight": { + "order": "score", + "fields": { + "content": { + "matched_fields": ["content", "content.plain"], + "type" : "fvh" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string": { + "query": "running scissors", + "fields": ["content", "content.plain^10"] + } + }, + "highlight": { + "order": "score", + "fields": { + "content": { + "matched_fields": ["content", "content.plain"], + "type" : "fvh" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string": { + "query": "running scissors", + "fields": ["content", "content.plain^10"] + } + }, + "highlight": { + "order": "score", + "fields": { + "content": { + "matched_fields": ["content.plain"], + "type" : "fvh" + } + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "min_score": 0.5, + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +query part: preference=xyzabc123 +body part: +{ + "query": { + "match": { + "title": "elasticsearch" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "should" : [ + {"match" : { "name.first" : {"query" : "shay", "_name" : "first"} }}, + {"match" : { "name.last" : {"query" : "banon", "_name" : "last"} }} + ], + "filter" : { + "terms" : { + "name.last" : ["banon", "kimchy"], + "_name" : "test" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "indices_boost" : { + "index1" : 1.4, + "index2" : 1.3 + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "post_date": { "type": "date" }, + "user": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "age": { "type": "integer" } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { "post_date" : {"order" : "asc"}}, + "user", + { "name" : "desc" }, + { "age" : "desc" }, + "_score" + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +query part: refresh +body part: +{ + "product": "chocolate", + "price": [20, 4] +} +emit snippet +body part: +{ + "query" : { + "term" : { "product" : "chocolate" } + }, + "sort" : [ + {"price" : {"order" : "asc", "mode" : "avg"}} + ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "term" : { "product" : "chocolate" } + }, + "sort" : [ + { + "offer.price" : { + "mode" : "avg", + "order" : "asc", + "nested_path" : "offer", + "nested_filter" : { + "term" : { "offer.color" : "blue" } + } + } + } + ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { "price" : {"missing" : "_last"} } + ], + "query" : { + "term" : { "product" : "chocolate" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { "price" : {"unmapped_type" : "long"} } + ], + "query" : { + "term" : { "product" : "chocolate" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { + "_geo_distance" : { + "pin.location" : [-70, 40], + "order" : "asc", + "unit" : "km", + "mode" : "min", + "distance_type" : "sloppy_arc" + } + } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { + "_geo_distance" : { + "pin.location" : { + "lat" : 40, + "lon" : -70 + }, + "order" : "asc", + "unit" : "km" + } + } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { + "_geo_distance" : { + "pin.location" : "40,-70", + "order" : "asc", + "unit" : "km" + } + } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { + "_geo_distance" : { + "pin.location" : "drm3btev3e86", + "order" : "asc", + "unit" : "km" + } + } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "sort" : [ + { + "_geo_distance" : { + "pin.location" : [-70, 40], + "order" : "asc", + "unit" : "km" + } + } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "term" : { "user" : "kimchy" } + }, + "sort" : { + "_script" : { + "type" : "number", + "script" : { + "inline": "doc['field_name'].value * factor", + "params" : { + "factor" : 1.1 + } + }, + "order" : "asc" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "track_scores": true, + "sort" : [ + { "post_date" : {"order" : "desc"} }, + { "name" : "desc" }, + { "age" : "desc" } + ], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "version": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "fields" : ["user", "postDate"], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "fields" : [], + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +null +emit snippet +body part: +{ + "mappings": { + "item": { + "properties": { + "brand": { "type": "keyword"}, + "color": { "type": "keyword"}, + "model": { "type": "keyword"} + } + } + } +} +emit snippet +query part: refresh +body part: +{ + "brand": "gucci", + "color": "red", + "model": "slim" +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "filter": [ + { "term": { "color": "red" }}, + { "term": { "brand": "gucci" }} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "filter": [ + { "term": { "color": "red" }}, + { "term": { "brand": "gucci" }} + ] + } + }, + "aggs": { + "models": { + "terms": { "field": "model" } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "filter": { + "term": { "brand": "gucci" } + } + } + }, + "aggs": { + "colors": { + "terms": { "field": "color" } + }, + "color_red": { + "filter": { + "term": { "color": "red" } + }, + "aggs": { + "models": { + "terms": { "field": "model" } + } + } + } + }, + "post_filter": { + "term": { "color": "red" } + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "size": 10, + "query": { + "match" : { + "title" : "elasticsearch" + } + }, + "sort": [ + {"date": "asc"}, + {"_uid": "desc"} + ] +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "size": 10, + "query": { + "match" : { + "title" : "elasticsearch" + } + }, + "search_after": [1463538857, "tweet#654323"], + "sort": [ + {"date": "asc"}, + {"_uid": "desc"} + ] +} +handle snippet +null +emit snippet +query part: refresh +body part: +{"index":{"_id":1}} +{"user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch"} +{"index":{"_id":2}} +{"user" : "kimchi", "post_date" : "2009-11-15T14:12:13", "message" : "My username is similar to @kimchy!"} +handle snippet +test snippet +emit snippet +query part: q=user:foo +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "bool" : { + "must" : { + "query_string" : { + "query" : "*:*" + } + }, + "filter" : { + "term" : { "user" : "kimchy" } + } + } + } +} +handle snippet +test snippet +emit snippet +query part: q=post_date:foo +handle snippet +handle snippet +test snippet +emit snippet +query part: q=post_date:foo&explain=true +handle snippet +handle snippet +test snippet +emit snippet +query part: rewrite=true +body part: +{ + "query": { + "match": { + "user": { + "query": "kimchy", + "fuzziness": "auto" + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +query part: rewrite=true +body part: +{ + "query": { + "more_like_this": { + "like": { + "_id": "2" + }, + "boost_terms": 1 + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: q=user:kimchy +handle snippet +handle snippet +test snippet +emit snippet +query part: refresh +body part: +{ + "user": "kimchy" +} +emit snippet +query part: q=user:kimchy +emit snippet +body part: +{ + "query" : { + "term" : { "user" : "kimchy" } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "this is a test", + "fields": [ "subject", "message" ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "Will Smith", + "fields": [ "title", "*_name" ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query" : "this is a test", + "fields" : [ "subject^3", "message" ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "brown fox", + "type": "best_fields", + "fields": [ "subject", "message" ], + "tie_breaker": 0.3 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "dis_max": { + "queries": [ + { "match": { "subject": "brown fox" }}, + { "match": { "message": "brown fox" }} + ], + "tie_breaker": 0.3 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "Will Smith", + "type": "best_fields", + "fields": [ "first_name", "last_name" ], + "operator": "and" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "quick brown fox", + "type": "most_fields", + "fields": [ "title", "title.original", "title.shingles" ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "should": [ + { "match": { "title": "quick brown fox" }}, + { "match": { "title.original": "quick brown fox" }}, + { "match": { "title.shingles": "quick brown fox" }} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "quick brown f", + "type": "phrase_prefix", + "fields": [ "subject", "message" ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "dis_max": { + "queries": [ + { "match_phrase_prefix": { "subject": "quick brown f" }}, + { "match_phrase_prefix": { "message": "quick brown f" }} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "Will Smith", + "type": "cross_fields", + "fields": [ "first_name", "last_name" ], + "operator": "and" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "Jon", + "type": "cross_fields", + "fields": [ + "first", "first.edge", + "last", "last.edge" + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "should": [ + { + "multi_match" : { + "query": "Will Smith", + "type": "cross_fields", + "fields": [ "first", "last" ], + "minimum_should_match": "50%" + } + }, + { + "multi_match" : { + "query": "Will Smith", + "type": "cross_fields", + "fields": [ "*.edge" ] + } + } + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "multi_match" : { + "query": "Jon", + "type": "cross_fields", + "analyzer": "standard", + "fields": [ "first", "last", "*.edge" ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string" : { + "default_field" : "content", + "query" : "this AND that OR thus" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string" : { + "fields" : ["content", "name"], + "query" : "this AND that" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string": { + "query": "(content:this OR name:this) AND (content:that OR name:that)" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string" : { + "fields" : ["content", "name^5"], + "query" : "this AND that OR thus", + "use_dis_max" : true + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string" : { + "fields" : ["city.*"], + "query" : "this AND that OR thus", + "use_dis_max" : true + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string" : { + "fields" : ["content", "name.*^5"], + "query" : "this AND that OR thus", + "use_dis_max" : true + } + } +} +handle snippet +null +emit snippet +body part: +{ + "mappings": { + "location": { + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } +} +emit snippet +body part: +{ + "pin" : { + "location" : { + "lat" : 40.12, + "lon" : -71.34 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance" : { + "distance" : "200km", + "pin.location" : { + "lat" : 40, + "lon" : -70 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance" : { + "distance" : "12km", + "pin.location" : { + "lat" : 40, + "lon" : -70 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance" : { + "distance" : "12km", + "pin.location" : [-70, 40] + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance" : { + "distance" : "12km", + "pin.location" : "40,-70" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance" : { + "distance" : "12km", + "pin.location" : "drm3btev3e86" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_all": {} + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_all": { "boost" : 1.2 } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_none": {} + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "more_like_this" : { + "fields" : ["title", "description"], + "like" : "Once upon a time", + "min_term_freq" : 1, + "max_query_terms" : 12 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "more_like_this" : { + "fields" : ["title", "description"], + "like" : [ + { + "_index" : "imdb", + "_type" : "movies", + "_id" : "1" + }, + { + "_index" : "imdb", + "_type" : "movies", + "_id" : "2" + }, + "and potentially some more text here as well" + ], + "min_term_freq" : 1, + "max_query_terms" : 12 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "more_like_this" : { + "fields" : ["name.first", "name.last"], + "like" : [ + { + "_index" : "marvel", + "_type" : "quotes", + "doc" : { + "name": { + "first": "Ben", + "last": "Grimm" + }, + "tweet": "You got no idea what I'd... what I'd give to be invisible." + } + }, + { + "_index" : "marvel", + "_type" : "quotes", + "_id" : "2" + } + ], + "min_term_freq" : 1, + "max_query_terms" : 12 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "movies": { + "properties": { + "title": { + "type": "text", + "term_vector": "yes" + }, + "description": { + "type": "text" + }, + "tags": { + "type": "text", + "fields" : { + "raw": { + "type" : "text", + "analyzer": "keyword", + "term_vector" : "yes" + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "indices" : { + "indices" : ["index1", "index2"], + "query" : { "term" : { "tag" : "wow" } }, + "no_match_query" : { "term" : { "tag" : "kow" } } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "range" : { + "age" : { + "gte" : 10, + "lte" : 20, + "boost" : 2.0 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "range" : { + "date" : { + "gte" : "now-1d/d", + "lt" : "now/d" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "range" : { + "born" : { + "gte": "01/01/2012", + "lte": "2013", + "format": "dd/MM/yyyy||yyyy" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "range" : { + "timestamp" : { + "gte": "2015-01-01 00:00:00", + "lte": "now", + "time_zone": "+01:00" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "common": { + "body": { + "query": "this is bonsai cool", + "cutoff_frequency": 0.001 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "common": { + "body": { + "query": "nelly the elephant as a cartoon", + "cutoff_frequency": 0.001, + "low_freq_operator": "and" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": [ + { "term": { "body": "nelly"}}, + { "term": { "body": "elephant"}}, + { "term": { "body": "cartoon"}} + ], + "should": [ + { "term": { "body": "the"}}, + { "term": { "body": "as"}}, + { "term": { "body": "a"}} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "common": { + "body": { + "query": "nelly the elephant as a cartoon", + "cutoff_frequency": 0.001, + "minimum_should_match": 2 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": { + "bool": { + "should": [ + { "term": { "body": "nelly"}}, + { "term": { "body": "elephant"}}, + { "term": { "body": "cartoon"}} + ], + "minimum_should_match": 2 + } + }, + "should": [ + { "term": { "body": "the"}}, + { "term": { "body": "as"}}, + { "term": { "body": "a"}} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "common": { + "body": { + "query": "nelly the elephant not as a cartoon", + "cutoff_frequency": 0.001, + "minimum_should_match": { + "low_freq" : 2, + "high_freq" : 3 + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": { + "bool": { + "should": [ + { "term": { "body": "nelly"}}, + { "term": { "body": "elephant"}}, + { "term": { "body": "cartoon"}} + ], + "minimum_should_match": 2 + } + }, + "should": { + "bool": { + "should": [ + { "term": { "body": "the"}}, + { "term": { "body": "not"}}, + { "term": { "body": "as"}}, + { "term": { "body": "a"}} + ], + "minimum_should_match": 3 + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "common": { + "body": { + "query": "how not to be", + "cutoff_frequency": 0.001, + "minimum_should_match": { + "low_freq" : 2, + "high_freq" : 3 + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "should": [ + { "term": { "body": "how"}}, + { "term": { "body": "not"}}, + { "term": { "body": "to"}}, + { "term": { "body": "be"}} + ], + "minimum_should_match": "3<50%" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_within" : { + "little" : { + "span_term" : { "field1" : "foo" } + }, + "big" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "bar" } }, + { "span_term" : { "field1" : "baz" } } + ], + "slop" : 5, + "in_order" : true + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_near" : { + "clauses" : [ + { "span_term" : { "field" : "value1" } }, + { "span_term" : { "field" : "value2" } }, + { "span_term" : { "field" : "value3" } } + ], + "slop" : 12, + "in_order" : false + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_distance_range" : { + "from" : "200km", + "to" : "400km", + "pin.location" : { + "lat" : 40, + "lon" : -70 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "simple_query_string" : { + "query": "\"fried eggs\" +(eggplant | potato) -frittata", + "analyzer": "snowball", + "fields": ["body^5","_all"], + "default_operator": "and" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "simple_query_string" : { + "fields" : ["content", "name.*^5"], + "query" : "foo bar baz" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "simple_query_string" : { + "query" : "foo | bar + baz*", + "flags" : "OR|AND|PREFIX" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_multi":{ + "match":{ + "prefix" : { "user" : { "value" : "ki" } } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_multi":{ + "match":{ + "prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "constant_score" : { + "filter" : { + "term" : { "user" : "kimchy"} + }, + "boost" : 1.2 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "wildcard" : { "user" : "ki*y" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } } + } +} +handle snippet +null +emit snippet +body part: +{ + "mappings": { + "type1" : { + "properties" : { + "obj1" : { + "type" : "nested" + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "nested" : { + "path" : "obj1", + "score_mode" : "avg", + "query" : { + "bool" : { + "must" : [ + { "match" : {"obj1.name" : "blue"} }, + { "range" : {"obj1.count" : {"gt" : 5}} } + ] + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "term" : { "user" : "Kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "should": [ + { + "term": { + "status": { + "value": "urgent", + "boost": 2.0 + } + } + }, + { + "term": { + "status": "normal" + } + } + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "full_text": { + "type": "text" + }, + "exact_value": { + "type": "keyword" + } + } + } + } +} +emit snippet +body part: +{ + "full_text": "Quick Foxes!", + "exact_value": "Quick Foxes!" +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "term": { + "exact_value": "Quick Foxes!" + } + } +} +emit snippet +body part: +{ + "query": { + "term": { + "full_text": "Quick Foxes!" + } + } +} +emit snippet +body part: +{ + "query": { + "term": { + "full_text": "foxes" + } + } +} +emit snippet +body part: +{ + "query": { + "match": { + "full_text": "Quick Foxes!" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match" : { + "message" : "this is a test" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match" : { + "message" : { + "query" : "this is a test", + "operator" : "and" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match" : { + "message" : { + "query" : "to be or not to be", + "operator" : "and", + "zero_terms_query": "all" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match" : { + "message" : { + "query" : "to be or not to be", + "cutoff_frequency" : 0.001 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": [ + { "match": { "title": "Search" }}, + { "match": { "content": "Elasticsearch" }} + ], + "filter": [ + { "term": { "status": "published" }}, + { "range": { "publish_date": { "gte": "2015-01-01" }}} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "ids" : { + "type" : "my_type", + "values" : ["1", "4", "100"] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "dis_max" : { + "tie_breaker" : 0.7, + "boost" : 1.2, + "queries" : [ + { + "term" : { "age" : 34 } + }, + { + "term" : { "age" : 35 } + } + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_phrase" : { + "message" : "this is a test" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_phrase" : { + "message" : { + "query" : "this is a test", + "analyzer" : "my_analyzer" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "script" : { + "script" : "doc['num1'].value > 1" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "script" : { + "script" : { + "inline" : "doc['num1'].value > param1", + "params" : { + "param1" : 5 + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ "query": { + "prefix" : { "user" : "ki" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ "query": { + "prefix" : { "user" : { "value" : "ki", "boost" : 2.0 } } + } +} +handle snippet +test snippet +emit snippet +body part: +{ "query": { + "prefix" : { "user" : { "prefix" : "ki", "boost" : 2.0 } } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "term" : { "user" : "kimchy" } + }, + "filter": { + "term" : { "tag" : "tech" } + }, + "must_not" : { + "range" : { + "age" : { "from" : 10, "to" : 20 } + } + }, + "should" : [ + { "term" : { "tag" : "wow" } }, + { "term" : { "tag" : "elasticsearch" } } + ], + "minimum_should_match" : 1, + "boost" : 1.0 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "filter": { + "term": { + "status": "active" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": { + "match_all": {} + }, + "filter": { + "term": { + "status": "active" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "constant_score": { + "filter": { + "term": { + "status": "active" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_containing" : { + "little" : { + "span_term" : { "field1" : "foo" } + }, + "big" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "bar" } }, + { "span_term" : { "field1" : "baz" } } + ], + "slop" : 5, + "in_order" : true + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "boosting" : { + "positive" : { + "term" : { + "field1" : "value1" + } + }, + "negative" : { + "term" : { + "field2" : "value2" + } + }, + "negative_boost" : 0.2 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "has_child" : { + "type" : "blog_tag", + "query" : { + "term" : { + "tag" : "something" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "has_child" : { + "type" : "blog_tag", + "score_mode" : "min", + "query" : { + "term" : { + "tag" : "something" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "has_child" : { + "type" : "blog_tag", + "score_mode" : "min", + "min_children": 2, + "max_children": 10, + "query" : { + "term" : { + "tag" : "something" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "constant_score" : { + "filter" : { + "terms" : { "user" : ["kimchy", "elasticsearch"]} + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "followers" : ["1", "3"] +} +emit snippet +body part: +{ + "user" : "1" +} +emit snippet +body part: +{ + "query" : { + "terms" : { + "user" : { + "index" : "users", + "type" : "user", + "id" : "2", + "path" : "followers" + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "exists" : { "field" : "user" } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must_not": { + "exists": { + "field": "user" + } + } + } + } +} +handle snippet +null +emit snippet +body part: +{ + "mappings" : { + "location": { + "properties": { + "pin": { + "type": "geo_point", + "geohash": true, + "geohash_prefix": true, + "geohash_precision": 10 + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geohash_cell": { + "pin": { + "lat": 13.4080, + "lon": 52.5186 + }, + "precision": 3, + "neighbors": true + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "regexp":{ + "name.first": "s.*y" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "regexp":{ + "name.first":{ + "value":"s.*y", + "boost":1.2 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "regexp":{ + "name.first": { + "value": "s.*y", + "flags" : "INTERSECTION|COMPLEMENT|EMPTY" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "regexp":{ + "name.first": { + "value": "s.*y", + "flags" : "INTERSECTION|COMPLEMENT|EMPTY", + "max_determinized_states": 20000 + } + } + } +} +handle snippet +null +emit snippet +body part: +{ + "mappings": { + "blog_post": { + "properties": { + "name": { + "type": "keyword" + } + } + }, + "blog_tag": { + "_parent": { + "type": "blog_post" + }, + "_routing": { + "required": true + } + } + } +} +emit snippet +query part: wait_for_status=yellow +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "parent_id" : { + "type" : "blog_tag", + "id" : "1" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "has_parent": { + "type": "blog_post", + "query": { + "term": { + "_id": "1" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "has_parent" : { + "parent_type" : "blog", + "query" : { + "term" : { + "tag" : "something" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "has_parent" : { + "parent_type" : "blog", + "score" : true, + "query" : { + "term" : { + "tag" : "something" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "template": { + "inline": { "match": { "text": "{{query_string}}" }}, + "params" : { + "query_string" : "all about search" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match": { + "text": "all about search" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "template": { + "inline": "{ \"match\": { \"text\": \"{{query_string}}\" }}", + "params" : { + "query_string" : "all about search" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "template": { + "file": "my_template", + "params" : { + "query_string" : "all about search" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "template": { "match": { "text": "{{query_string}}" }} +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "template": { + "id": "my_template", + "params" : { + "query_string" : "all about search" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_first" : { + "match" : { + "span_term" : { "user" : "kimchy" } + }, + "end" : 3 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_polygon" : { + "person.location" : { + "points" : [ + {"lat" : 40, "lon" : -70}, + {"lat" : 30, "lon" : -80}, + {"lat" : 20, "lon" : -90} + ] + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_polygon" : { + "person.location" : { + "points" : [ + [-70, 40], + [-80, 30], + [-90, 20] + ] + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_polygon" : { + "person.location" : { + "points" : [ + "40, -70", + "30, -80", + "20, -90" + ] + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_polygon" : { + "person.location" : { + "points" : [ + "drn5x1g8cu2y", + "30, -80", + "20, -90" + ] + } + } + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query":{ + "bool": { + "must": { + "match_all": {} + }, + "filter": { + "geo_shape": { + "location": { + "shape": { + "type": "envelope", + "coordinates" : [[13.0, 53.0], [14.0, 52.0]] + }, + "relation": "within" + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": { + "match_all": {} + }, + "filter": { + "geo_shape": { + "location": { + "indexed_shape": { + "id": "DEU", + "type": "countries", + "index": "shapes", + "path": "location" + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "function_score": { + "query": {}, + "boost": "5", + "random_score": {}, + "boost_mode":"multiply" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "function_score": { + "query": {}, + "boost": "5", + "functions": [ + { + "filter": {}, + "random_score": {}, + "weight": 23 + }, + { + "filter": {}, + "weight": 42 + } + ], + "max_boost": 42, + "score_mode": "max", + "boost_mode": "multiply", + "min_score" : 42 + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "function_score": { + "functions": [ + { + "gauss": { + "price": { + "origin": "0", + "scale": "20" + } + } + }, + { + "gauss": { + "location": { + "origin": "11, 12", + "scale": "2km" + } + } + } + ], + "query": { + "match": { + "properties": "balcony" + } + }, + "score_mode": "multiply" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_or" : { + "clauses" : [ + { "span_term" : { "field" : "value1" } }, + { "span_term" : { "field" : "value2" } }, + { "span_term" : { "field" : "value3" } } + ] + } + } +} +handle snippet +null +emit snippet +body part: +{ + "mappings": { + "location": { + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } + } + } +} +emit snippet +body part: +{ + "pin" : { + "location" : { + "lat" : 40.12, + "lon" : -71.34 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top_left" : { + "lat" : 40.73, + "lon" : -74.1 + }, + "bottom_right" : { + "lat" : 40.01, + "lon" : -71.12 + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top_left" : { + "lat" : 40.73, + "lon" : -74.1 + }, + "bottom_right" : { + "lat" : 40.01, + "lon" : -71.12 + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top_left" : [-74.1, 40.73], + "bottom_right" : [-71.12, 40.01] + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top_left" : "40.73, -74.1", + "bottom_right" : "40.01, -71.12" + } + } + } + } +} +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top_left" : "dr5r9ydj2y73", + "bottom_right" : "drj7teegpus6" + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top" : 40.73, + "left" : -74.1, + "bottom" : 40.01, + "right" : -71.12 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool" : { + "must" : { + "match_all" : {} + }, + "filter" : { + "geo_bounding_box" : { + "pin.location" : { + "top_left" : { + "lat" : 40.73, + "lon" : -74.1 + }, + "bottom_right" : { + "lat" : 40.10, + "lon" : -71.12 + } + }, + "type" : "indexed" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_phrase_prefix" : { + "message" : "quick brown f" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_phrase_prefix" : { + "message" : { + "query" : "quick brown f", + "max_expansions" : 10 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "doctype": { + "properties": { + "message": { + "type": "string" + } + } + }, + "queries": { + "properties": { + "query": { + "type": "percolator" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match" : { + "message" : "bonsai tree" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "percolate" : { + "field" : "query", + "document_type" : "doctype", + "document" : { + "message" : "A new bonsai tree in the office" + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "message" : "A new bonsai tree in the office" +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "percolate" : { + "field": "query", + "document_type" : "doctype", + "index" : "my-index", + "type" : "message", + "id" : "1", + "version" : 1 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match" : { + "message" : "brown fox" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "match" : { + "message" : "lazy dog" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query" : { + "percolate" : { + "field": "query", + "document_type" : "doctype", + "document" : { + "message" : "The quick brown fox jumps over the lazy dog" + } + } + }, + "highlight": { + "fields": { + "message": {} + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "term" : { + "query.unknown_query" : "" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_not" : { + "include" : { + "span_term" : { "field1" : "hoya" } + }, + "exclude" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "la" } }, + { "span_term" : { "field1" : "hoya" } } + ], + "slop" : 0, + "in_order" : true + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "type" : { + "value" : "my_type" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_term" : { "user" : "kimchy" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_term" : { "user" : { "value" : "kimchy", "boost" : 2.0 } } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "span_term" : { "user" : { "term" : "kimchy", "boost" : 2.0 } } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "fuzzy" : { "user" : "ki" } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "fuzzy" : { + "user" : { + "value" : "ki", + "boost" : 1.0, + "fuzziness" : 2, + "prefix_length" : 0, + "max_expansions": 100 + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "description" : "describe pipeline", + "processors" : [ + { + "set" : { + "field": "foo", + "value": "bar" + } + } + // other processors + ] +} +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "pipeline" : + { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "_value" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_type": "type", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_type": "type", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] +} +handle snippet +handle snippet +test snippet +emit snippet +query part: verbose +body part: +{ + "pipeline" : + { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "_value2" + } + }, + { + "set" : { + "field" : "field3", + "value" : "_value3" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_type": "type", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "index", + "_type": "type", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ] +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +query part: nodes=nodeId1,nodeId2 +emit snippet +query part: nodes=nodeId1,nodeId2&actions=cluster:* +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +query part: parent_task_id=parentTaskId:1 +handle snippet +test snippet +emit snippet +query part: wait_for_completion=true&timeout=10s +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +query part: node_id=nodeId1,nodeId2&actions=*reindex +handle snippet +test snippet +emit snippet +query part: group_by=parents +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "transient" : { + "cluster.routing.allocation.exclude._ip" : "10.0.0.1" + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "transient": { + "cluster.routing.allocation.include._ip": "192.168.2.*" + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "transient": { + "cluster.routing.allocation.disk.watermark.low": "80%", + "cluster.routing.allocation.disk.watermark.high": "50gb", + "cluster.info.update.interval": "1m" + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +null +emit snippet +query part: refresh +body part: +{"index":{"_id":1}} +{"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]} +{"index":{"_id":2}} +{"first":"sean","last":"monohan","goals":[7,54,26],"assists":[11,26,13],"gp":[26,82,82]} +{"index":{"_id":3}} +{"first":"jiri","last":"hudler","goals":[5,34,36],"assists":[11,62,42],"gp":[24,80,79]} +{"index":{"_id":4}} +{"first":"micheal","last":"frolik","goals":[4,6,15],"assists":[8,23,15],"gp":[26,82,82]} +{"index":{"_id":5}} +{"first":"sam","last":"bennett","goals":[5,0,0],"assists":[8,1,0],"gp":[26,1,0]} +{"index":{"_id":6}} +{"first":"dennis","last":"wideman","goals":[0,26,15],"assists":[11,30,24],"gp":[26,81,82]} +{"index":{"_id":7}} +{"first":"david","last":"jones","goals":[7,19,5],"assists":[3,17,4],"gp":[26,45,34]} +{"index":{"_id":8}} +{"first":"tj","last":"brodie","goals":[2,14,7],"assists":[8,42,30],"gp":[26,82,82]} +{"index":{"_id":39}} +{"first":"mark","last":"giordano","goals":[6,30,15],"assists":[3,30,24],"gp":[26,60,63]} +{"index":{"_id":10}} +{"first":"mikael","last":"backlund","goals":[3,15,13],"assists":[6,24,18],"gp":[26,82,82]} +{"index":{"_id":11}} +{"first":"joe","last":"colborne","goals":[3,18,13],"assists":[6,20,24],"gp":[26,67,82]} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "function_score": { + "script_score": { + "script": { + "lang": "painless", + "inline": "int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_all": {} + }, + "script_fields": { + "total_goals": { + "script": { + "lang": "painless", + "inline": "int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match_all": {} + }, + "sort": { + "_script": { + "type": "string", + "order": "asc", + "script": { + "lang": "painless", + "inline": "doc['first'].value + ' ' + doc['last'].value" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "fields": [ + "_id", + "_source" + ], + "query": { + "term": { + "_id": 1 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "script": { + "lang": "painless", + "inline": "ctx._source.last = params.last", + "params": { + "last": "hockey" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "script": { + "lang": "painless", + "inline": "ctx._source.last = params.last; ctx._source.nick = params.nick", + "params": { + "last": "gaudreau", + "nick": "hockey" + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "text": "quick brown fox", + "popularity": 1 +} +emit snippet +body part: +{ + "text": "quick fox", + "popularity": 5 +} +emit snippet +body part: +{ + "query": { + "function_score": { + "query": { + "match": { + "text": "quick brown fox" + } + }, + "script_score": { + "script": { + "lang": "expression", + "inline": "_score * doc['popularity']" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "cost_price": 100 +} +emit snippet +body part: +{ + "script_fields": { + "sales_price": { + "script": { + "lang": "expression", + "inline": "doc['cost_price'] * markup", + "params": { + "markup": 0.2 + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "title": { + "type": "text" + }, + "first_name": { + "type": "text", + "store": true + }, + "last_name": { + "type": "text", + "store": true + } + } + } + } +} +emit snippet +body part: +{ + "title": "Mr", + "first_name": "Barry", + "last_name": "White" +} +emit snippet +body part: +{ + "script_fields": { + "source": { + "script": { + "lang": "groovy", + "inline": "_source.title + ' ' + _source.first_name + ' ' + _source.last_name" + } + }, + "stored_fields": { + "script": { + "lang": "groovy", + "inline": "_fields['first_name'].value + ' ' + _fields['last_name'].value" + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "my_field": 5 +} +emit snippet +body part: +{ + "script_fields": { + "my_doubled_field": { + "script": { + "lang": "expression", + "inline": "doc['my_field'] * multiplier", + "params": { + "multiplier": 2 + } + } + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "script": "log(_score * 2) + my_modifier" +} +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "script": { + "script": { + "lang": "groovy", + "id": "calculate-score", + "params": { + "my_modifier": 2 + } + } + } + } +} +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +catch part: /cannot set discovery.zen.minimum_master_nodes to more than the current master nodes/ +body part: +{ + "transient": { + "discovery.zen.minimum_master_nodes": 2 + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} + {"index":{"_id": "5"}} + {"user": "test", "message": "some message with the number 5", "date": "2009-11-15T14:12:12", "likes": 5} + {"index":{"_id": "6"}} + {"user": "test", "message": "some message with the number 6", "date": "2009-11-15T14:12:12", "likes": 6} + {"index":{"_id": "7"}} + {"user": "test", "message": "some message with the number 7", "date": "2009-11-15T14:12:12", "likes": 7} + {"index":{"_id": "8"}} + {"user": "test", "message": "some message with the number 8", "date": "2009-11-15T14:12:12", "likes": 8} + {"index":{"_id": "9"}} + {"user": "test", "message": "some message with the number 9", "date": "2009-11-15T14:12:12", "likes": 9} + {"index":{"_id": "10"}} + {"user": "test", "message": "some message with the number 10", "date": "2009-11-15T14:12:12", "likes": 10} + {"index":{"_id": "11"}} + {"user": "test", "message": "some message with the number 11", "date": "2009-11-15T14:12:12", "likes": 11} + {"index":{"_id": "12"}} + {"user": "test", "message": "some message with the number 12", "date": "2009-11-15T14:12:12", "likes": 12} + {"index":{"_id": "13"}} + {"user": "test", "message": "some message with the number 13", "date": "2009-11-15T14:12:12", "likes": 13} + {"index":{"_id": "14"}} + {"user": "test", "message": "some message with the number 14", "date": "2009-11-15T14:12:12", "likes": 14} + {"index":{"_id": "15"}} + {"user": "test", "message": "some message with the number 15", "date": "2009-11-15T14:12:12", "likes": 15} + {"index":{"_id": "16"}} + {"user": "test", "message": "some message with the number 16", "date": "2009-11-15T14:12:12", "likes": 16} + {"index":{"_id": "17"}} + {"user": "test", "message": "some message with the number 17", "date": "2009-11-15T14:12:12", "likes": 17} + {"index":{"_id": "18"}} + {"user": "test", "message": "some message with the number 18", "date": "2009-11-15T14:12:12", "likes": 18} + {"index":{"_id": "19"}} + {"user": "test", "message": "some message with the number 19", "date": "2009-11-15T14:12:12", "likes": 19} + {"index":{"_id": "20"}} + {"user": "test", "message": "some message with the number 20", "date": "2009-11-15T14:12:12", "likes": 20} + {"index":{"_id": "21"}} + {"user": "test", "message": "some message with the number 21", "date": "2009-11-15T14:12:12", "likes": 21} + {"index":{"_id": "22"}} + {"user": "test", "message": "some message with the number 22", "date": "2009-11-15T14:12:12", "likes": 22} + {"index":{"_id": "23"}} + {"user": "test", "message": "some message with the number 23", "date": "2009-11-15T14:12:12", "likes": 23} + {"index":{"_id": "24"}} + {"user": "test", "message": "some message with the number 24", "date": "2009-11-15T14:12:12", "likes": 24} + {"index":{"_id": "25"}} + {"user": "test", "message": "some message with the number 25", "date": "2009-11-15T14:12:12", "likes": 25} + {"index":{"_id": "26"}} + {"user": "test", "message": "some message with the number 26", "date": "2009-11-15T14:12:12", "likes": 26} + {"index":{"_id": "27"}} + {"user": "test", "message": "some message with the number 27", "date": "2009-11-15T14:12:12", "likes": 27} + {"index":{"_id": "28"}} + {"user": "test", "message": "some message with the number 28", "date": "2009-11-15T14:12:12", "likes": 28} + {"index":{"_id": "29"}} + {"user": "test", "message": "some message with the number 29", "date": "2009-11-15T14:12:12", "likes": 29} + {"index":{"_id": "30"}} + {"user": "test", "message": "some message with the number 30", "date": "2009-11-15T14:12:12", "likes": 30} + {"index":{"_id": "31"}} + {"user": "test", "message": "some message with the number 31", "date": "2009-11-15T14:12:12", "likes": 31} + {"index":{"_id": "32"}} + {"user": "test", "message": "some message with the number 32", "date": "2009-11-15T14:12:12", "likes": 32} + {"index":{"_id": "33"}} + {"user": "test", "message": "some message with the number 33", "date": "2009-11-15T14:12:12", "likes": 33} + {"index":{"_id": "34"}} + {"user": "test", "message": "some message with the number 34", "date": "2009-11-15T14:12:12", "likes": 34} + {"index":{"_id": "35"}} + {"user": "test", "message": "some message with the number 35", "date": "2009-11-15T14:12:12", "likes": 35} + {"index":{"_id": "36"}} + {"user": "test", "message": "some message with the number 36", "date": "2009-11-15T14:12:12", "likes": 36} + {"index":{"_id": "37"}} + {"user": "test", "message": "some message with the number 37", "date": "2009-11-15T14:12:12", "likes": 37} + {"index":{"_id": "38"}} + {"user": "test", "message": "some message with the number 38", "date": "2009-11-15T14:12:12", "likes": 38} + {"index":{"_id": "39"}} + {"user": "test", "message": "some message with the number 39", "date": "2009-11-15T14:12:12", "likes": 39} + {"index":{"_id": "40"}} + {"user": "test", "message": "some message with the number 40", "date": "2009-11-15T14:12:12", "likes": 40} + {"index":{"_id": "41"}} + {"user": "test", "message": "some message with the number 41", "date": "2009-11-15T14:12:12", "likes": 41} + {"index":{"_id": "42"}} + {"user": "test", "message": "some message with the number 42", "date": "2009-11-15T14:12:12", "likes": 42} + {"index":{"_id": "43"}} + {"user": "test", "message": "some message with the number 43", "date": "2009-11-15T14:12:12", "likes": 43} + {"index":{"_id": "44"}} + {"user": "test", "message": "some message with the number 44", "date": "2009-11-15T14:12:12", "likes": 44} + {"index":{"_id": "45"}} + {"user": "test", "message": "some message with the number 45", "date": "2009-11-15T14:12:12", "likes": 45} + {"index":{"_id": "46"}} + {"user": "test", "message": "some message with the number 46", "date": "2009-11-15T14:12:12", "likes": 46} + {"index":{"_id": "47"}} + {"user": "test", "message": "some message with the number 47", "date": "2009-11-15T14:12:12", "likes": 47} + {"index":{"_id": "48"}} + {"user": "test", "message": "some message with the number 48", "date": "2009-11-15T14:12:12", "likes": 48} + {"index":{"_id": "49"}} + {"user": "test", "message": "some message with the number 49", "date": "2009-11-15T14:12:12", "likes": 49} + {"index":{"_id": "50"}} + {"user": "test", "message": "some message with the number 50", "date": "2009-11-15T14:12:12", "likes": 50} + {"index":{"_id": "51"}} + {"user": "test", "message": "some message with the number 51", "date": "2009-11-15T14:12:12", "likes": 51} + {"index":{"_id": "52"}} + {"user": "test", "message": "some message with the number 52", "date": "2009-11-15T14:12:12", "likes": 52} + {"index":{"_id": "53"}} + {"user": "test", "message": "some message with the number 53", "date": "2009-11-15T14:12:12", "likes": 53} + {"index":{"_id": "54"}} + {"user": "test", "message": "some message with the number 54", "date": "2009-11-15T14:12:12", "likes": 54} + {"index":{"_id": "55"}} + {"user": "test", "message": "some message with the number 55", "date": "2009-11-15T14:12:12", "likes": 55} + {"index":{"_id": "56"}} + {"user": "test", "message": "some message with the number 56", "date": "2009-11-15T14:12:12", "likes": 56} + {"index":{"_id": "57"}} + {"user": "test", "message": "some message with the number 57", "date": "2009-11-15T14:12:12", "likes": 57} + {"index":{"_id": "58"}} + {"user": "test", "message": "some message with the number 58", "date": "2009-11-15T14:12:12", "likes": 58} + {"index":{"_id": "59"}} + {"user": "test", "message": "some message with the number 59", "date": "2009-11-15T14:12:12", "likes": 59} + {"index":{"_id": "60"}} + {"user": "test", "message": "some message with the number 60", "date": "2009-11-15T14:12:12", "likes": 60} + {"index":{"_id": "61"}} + {"user": "test", "message": "some message with the number 61", "date": "2009-11-15T14:12:12", "likes": 61} + {"index":{"_id": "62"}} + {"user": "test", "message": "some message with the number 62", "date": "2009-11-15T14:12:12", "likes": 62} + {"index":{"_id": "63"}} + {"user": "test", "message": "some message with the number 63", "date": "2009-11-15T14:12:12", "likes": 63} + {"index":{"_id": "64"}} + {"user": "test", "message": "some message with the number 64", "date": "2009-11-15T14:12:12", "likes": 64} + {"index":{"_id": "65"}} + {"user": "test", "message": "some message with the number 65", "date": "2009-11-15T14:12:12", "likes": 65} + {"index":{"_id": "66"}} + {"user": "test", "message": "some message with the number 66", "date": "2009-11-15T14:12:12", "likes": 66} + {"index":{"_id": "67"}} + {"user": "test", "message": "some message with the number 67", "date": "2009-11-15T14:12:12", "likes": 67} + {"index":{"_id": "68"}} + {"user": "test", "message": "some message with the number 68", "date": "2009-11-15T14:12:12", "likes": 68} + {"index":{"_id": "69"}} + {"user": "test", "message": "some message with the number 69", "date": "2009-11-15T14:12:12", "likes": 69} + {"index":{"_id": "70"}} + {"user": "test", "message": "some message with the number 70", "date": "2009-11-15T14:12:12", "likes": 70} + {"index":{"_id": "71"}} + {"user": "test", "message": "some message with the number 71", "date": "2009-11-15T14:12:12", "likes": 71} + {"index":{"_id": "72"}} + {"user": "test", "message": "some message with the number 72", "date": "2009-11-15T14:12:12", "likes": 72} + {"index":{"_id": "73"}} + {"user": "test", "message": "some message with the number 73", "date": "2009-11-15T14:12:12", "likes": 73} + {"index":{"_id": "74"}} + {"user": "test", "message": "some message with the number 74", "date": "2009-11-15T14:12:12", "likes": 74} + {"index":{"_id": "75"}} + {"user": "test", "message": "some message with the number 75", "date": "2009-11-15T14:12:12", "likes": 75} + {"index":{"_id": "76"}} + {"user": "test", "message": "some message with the number 76", "date": "2009-11-15T14:12:12", "likes": 76} + {"index":{"_id": "77"}} + {"user": "test", "message": "some message with the number 77", "date": "2009-11-15T14:12:12", "likes": 77} + {"index":{"_id": "78"}} + {"user": "test", "message": "some message with the number 78", "date": "2009-11-15T14:12:12", "likes": 78} + {"index":{"_id": "79"}} + {"user": "test", "message": "some message with the number 79", "date": "2009-11-15T14:12:12", "likes": 79} + {"index":{"_id": "80"}} + {"user": "test", "message": "some message with the number 80", "date": "2009-11-15T14:12:12", "likes": 80} + {"index":{"_id": "81"}} + {"user": "test", "message": "some message with the number 81", "date": "2009-11-15T14:12:12", "likes": 81} + {"index":{"_id": "82"}} + {"user": "test", "message": "some message with the number 82", "date": "2009-11-15T14:12:12", "likes": 82} + {"index":{"_id": "83"}} + {"user": "test", "message": "some message with the number 83", "date": "2009-11-15T14:12:12", "likes": 83} + {"index":{"_id": "84"}} + {"user": "test", "message": "some message with the number 84", "date": "2009-11-15T14:12:12", "likes": 84} + {"index":{"_id": "85"}} + {"user": "test", "message": "some message with the number 85", "date": "2009-11-15T14:12:12", "likes": 85} + {"index":{"_id": "86"}} + {"user": "test", "message": "some message with the number 86", "date": "2009-11-15T14:12:12", "likes": 86} + {"index":{"_id": "87"}} + {"user": "test", "message": "some message with the number 87", "date": "2009-11-15T14:12:12", "likes": 87} + {"index":{"_id": "88"}} + {"user": "test", "message": "some message with the number 88", "date": "2009-11-15T14:12:12", "likes": 88} + {"index":{"_id": "89"}} + {"user": "test", "message": "some message with the number 89", "date": "2009-11-15T14:12:12", "likes": 89} + {"index":{"_id": "90"}} + {"user": "test", "message": "some message with the number 90", "date": "2009-11-15T14:12:12", "likes": 90} + {"index":{"_id": "91"}} + {"user": "test", "message": "some message with the number 91", "date": "2009-11-15T14:12:12", "likes": 91} + {"index":{"_id": "92"}} + {"user": "test", "message": "some message with the number 92", "date": "2009-11-15T14:12:12", "likes": 92} + {"index":{"_id": "93"}} + {"user": "test", "message": "some message with the number 93", "date": "2009-11-15T14:12:12", "likes": 93} + {"index":{"_id": "94"}} + {"user": "test", "message": "some message with the number 94", "date": "2009-11-15T14:12:12", "likes": 94} + {"index":{"_id": "95"}} + {"user": "test", "message": "some message with the number 95", "date": "2009-11-15T14:12:12", "likes": 95} + {"index":{"_id": "96"}} + {"user": "test", "message": "some message with the number 96", "date": "2009-11-15T14:12:12", "likes": 96} + {"index":{"_id": "97"}} + {"user": "test", "message": "some message with the number 97", "date": "2009-11-15T14:12:12", "likes": 97} + {"index":{"_id": "98"}} + {"user": "test", "message": "some message with the number 98", "date": "2009-11-15T14:12:12", "likes": 98} + {"index":{"_id": "99"}} + {"user": "test", "message": "some message with the number 99", "date": "2009-11-15T14:12:12", "likes": 99} + {"index":{"_id": "100"}} + {"user": "test", "message": "some message with the number 100", "date": "2009-11-15T14:12:12", "likes": 100} + {"index":{"_id": "101"}} + {"user": "test", "message": "some message with the number 101", "date": "2009-11-15T14:12:12", "likes": 101} + {"index":{"_id": "102"}} + {"user": "test", "message": "some message with the number 102", "date": "2009-11-15T14:12:12", "likes": 102} + {"index":{"_id": "103"}} + {"user": "test", "message": "some message with the number 103", "date": "2009-11-15T14:12:12", "likes": 103} + {"index":{"_id": "104"}} + {"user": "test", "message": "some message with the number 104", "date": "2009-11-15T14:12:12", "likes": 104} + {"index":{"_id": "105"}} + {"user": "test", "message": "some message with the number 105", "date": "2009-11-15T14:12:12", "likes": 105} + {"index":{"_id": "106"}} + {"user": "test", "message": "some message with the number 106", "date": "2009-11-15T14:12:12", "likes": 106} + {"index":{"_id": "107"}} + {"user": "test", "message": "some message with the number 107", "date": "2009-11-15T14:12:12", "likes": 107} + {"index":{"_id": "108"}} + {"user": "test", "message": "some message with the number 108", "date": "2009-11-15T14:12:12", "likes": 108} + {"index":{"_id": "109"}} + {"user": "test", "message": "some message with the number 109", "date": "2009-11-15T14:12:12", "likes": 109} + {"index":{"_id": "110"}} + {"user": "test", "message": "some message with the number 110", "date": "2009-11-15T14:12:12", "likes": 110} + {"index":{"_id": "111"}} + {"user": "test", "message": "some message with the number 111", "date": "2009-11-15T14:12:12", "likes": 111} + {"index":{"_id": "112"}} + {"user": "test", "message": "some message with the number 112", "date": "2009-11-15T14:12:12", "likes": 112} + {"index":{"_id": "113"}} + {"user": "test", "message": "some message with the number 113", "date": "2009-11-15T14:12:12", "likes": 113} + {"index":{"_id": "114"}} + {"user": "test", "message": "some message with the number 114", "date": "2009-11-15T14:12:12", "likes": 114} + {"index":{"_id": "115"}} + {"user": "test", "message": "some message with the number 115", "date": "2009-11-15T14:12:12", "likes": 115} + {"index":{"_id": "116"}} + {"user": "test", "message": "some message with the number 116", "date": "2009-11-15T14:12:12", "likes": 116} + {"index":{"_id": "117"}} + {"user": "test", "message": "some message with the number 117", "date": "2009-11-15T14:12:12", "likes": 117} + {"index":{"_id": "118"}} + {"user": "test", "message": "some message with the number 118", "date": "2009-11-15T14:12:12", "likes": 118} + {"index":{"_id": "119"}} + {"user": "test", "message": "some message with the number 119", "date": "2009-11-15T14:12:12", "likes": 119} +emit snippet +body part: +{ + "query": { + "match": { + "message": "some message" + } + } +} +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: conflicts=proceed +body part: +{ + "query": { + "match_all": {} + } +} +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "query": { + "match_all": {} + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: routing=1 +body part: +{ + "query": { + "range" : { + "age" : { + "gte" : 10 + } + } + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: scroll_size=5000 +body part: +{ + "query": { + "term": { + "user": "kimchy" + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +query part: detailed=true&action=*/delete/byquery +handle snippet +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +query part: requests_per_second=unlimited +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +handle snippet +handle snippet +test snippet +emit snippet +catch part: conflict +query part: version=2 +body part: +{ + "message" : "elasticsearch now has versioning support, double cool!" +} +handle snippet +test snippet +emit snippet +query part: op_type=create +body part: +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +handle snippet +test snippet +emit snippet +body part: +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +handle snippet +test snippet +emit snippet +body part: +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +handle snippet +handle snippet +test snippet +emit snippet +query part: routing=kimchy +body part: +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "tag_parent": {}, + "blog_tag": { + "_parent": { + "type": "tag_parent" + } + } + } +} +emit snippet +query part: parent=1111 +body part: +{ + "tag" : "something" +} +handle snippet +test snippet +emit snippet +query part: timestamp=2009-11-15T14:12:12 +body part: +{ + "user" : "kimchy", + "message" : "trying out Elasticsearch" +} +handle snippet +test snippet +emit snippet +query part: ttl=86400000ms +body part: +{ + "user": "kimchy", + "message": "Trying out elasticsearch, so far so good?" +} +handle snippet +test snippet +emit snippet +query part: ttl=1d +body part: +{ + "user": "kimchy", + "message": "Trying out elasticsearch, so far so good?" +} +handle snippet +test snippet +emit snippet +query part: timeout=5m +body part: +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} + {"index":{"_id": "5"}} + {"user": "test", "message": "some message with the number 5", "date": "2009-11-15T14:12:12", "likes": 5} + {"index":{"_id": "6"}} + {"user": "test", "message": "some message with the number 6", "date": "2009-11-15T14:12:12", "likes": 6} + {"index":{"_id": "7"}} + {"user": "test", "message": "some message with the number 7", "date": "2009-11-15T14:12:12", "likes": 7} + {"index":{"_id": "8"}} + {"user": "test", "message": "some message with the number 8", "date": "2009-11-15T14:12:12", "likes": 8} + {"index":{"_id": "9"}} + {"user": "test", "message": "some message with the number 9", "date": "2009-11-15T14:12:12", "likes": 9} + {"index":{"_id": "10"}} + {"user": "test", "message": "some message with the number 10", "date": "2009-11-15T14:12:12", "likes": 10} + {"index":{"_id": "11"}} + {"user": "test", "message": "some message with the number 11", "date": "2009-11-15T14:12:12", "likes": 11} + {"index":{"_id": "12"}} + {"user": "test", "message": "some message with the number 12", "date": "2009-11-15T14:12:12", "likes": 12} + {"index":{"_id": "13"}} + {"user": "test", "message": "some message with the number 13", "date": "2009-11-15T14:12:12", "likes": 13} + {"index":{"_id": "14"}} + {"user": "test", "message": "some message with the number 14", "date": "2009-11-15T14:12:12", "likes": 14} + {"index":{"_id": "15"}} + {"user": "test", "message": "some message with the number 15", "date": "2009-11-15T14:12:12", "likes": 15} + {"index":{"_id": "16"}} + {"user": "test", "message": "some message with the number 16", "date": "2009-11-15T14:12:12", "likes": 16} + {"index":{"_id": "17"}} + {"user": "test", "message": "some message with the number 17", "date": "2009-11-15T14:12:12", "likes": 17} + {"index":{"_id": "18"}} + {"user": "test", "message": "some message with the number 18", "date": "2009-11-15T14:12:12", "likes": 18} + {"index":{"_id": "19"}} + {"user": "test", "message": "some message with the number 19", "date": "2009-11-15T14:12:12", "likes": 19} + {"index":{"_id": "20"}} + {"user": "test", "message": "some message with the number 20", "date": "2009-11-15T14:12:12", "likes": 20} + {"index":{"_id": "21"}} + {"user": "test", "message": "some message with the number 21", "date": "2009-11-15T14:12:12", "likes": 21} + {"index":{"_id": "22"}} + {"user": "test", "message": "some message with the number 22", "date": "2009-11-15T14:12:12", "likes": 22} + {"index":{"_id": "23"}} + {"user": "test", "message": "some message with the number 23", "date": "2009-11-15T14:12:12", "likes": 23} + {"index":{"_id": "24"}} + {"user": "test", "message": "some message with the number 24", "date": "2009-11-15T14:12:12", "likes": 24} + {"index":{"_id": "25"}} + {"user": "test", "message": "some message with the number 25", "date": "2009-11-15T14:12:12", "likes": 25} + {"index":{"_id": "26"}} + {"user": "test", "message": "some message with the number 26", "date": "2009-11-15T14:12:12", "likes": 26} + {"index":{"_id": "27"}} + {"user": "test", "message": "some message with the number 27", "date": "2009-11-15T14:12:12", "likes": 27} + {"index":{"_id": "28"}} + {"user": "test", "message": "some message with the number 28", "date": "2009-11-15T14:12:12", "likes": 28} + {"index":{"_id": "29"}} + {"user": "test", "message": "some message with the number 29", "date": "2009-11-15T14:12:12", "likes": 29} + {"index":{"_id": "30"}} + {"user": "test", "message": "some message with the number 30", "date": "2009-11-15T14:12:12", "likes": 30} + {"index":{"_id": "31"}} + {"user": "test", "message": "some message with the number 31", "date": "2009-11-15T14:12:12", "likes": 31} + {"index":{"_id": "32"}} + {"user": "test", "message": "some message with the number 32", "date": "2009-11-15T14:12:12", "likes": 32} + {"index":{"_id": "33"}} + {"user": "test", "message": "some message with the number 33", "date": "2009-11-15T14:12:12", "likes": 33} + {"index":{"_id": "34"}} + {"user": "test", "message": "some message with the number 34", "date": "2009-11-15T14:12:12", "likes": 34} + {"index":{"_id": "35"}} + {"user": "test", "message": "some message with the number 35", "date": "2009-11-15T14:12:12", "likes": 35} + {"index":{"_id": "36"}} + {"user": "test", "message": "some message with the number 36", "date": "2009-11-15T14:12:12", "likes": 36} + {"index":{"_id": "37"}} + {"user": "test", "message": "some message with the number 37", "date": "2009-11-15T14:12:12", "likes": 37} + {"index":{"_id": "38"}} + {"user": "test", "message": "some message with the number 38", "date": "2009-11-15T14:12:12", "likes": 38} + {"index":{"_id": "39"}} + {"user": "test", "message": "some message with the number 39", "date": "2009-11-15T14:12:12", "likes": 39} + {"index":{"_id": "40"}} + {"user": "test", "message": "some message with the number 40", "date": "2009-11-15T14:12:12", "likes": 40} + {"index":{"_id": "41"}} + {"user": "test", "message": "some message with the number 41", "date": "2009-11-15T14:12:12", "likes": 41} + {"index":{"_id": "42"}} + {"user": "test", "message": "some message with the number 42", "date": "2009-11-15T14:12:12", "likes": 42} + {"index":{"_id": "43"}} + {"user": "test", "message": "some message with the number 43", "date": "2009-11-15T14:12:12", "likes": 43} + {"index":{"_id": "44"}} + {"user": "test", "message": "some message with the number 44", "date": "2009-11-15T14:12:12", "likes": 44} + {"index":{"_id": "45"}} + {"user": "test", "message": "some message with the number 45", "date": "2009-11-15T14:12:12", "likes": 45} + {"index":{"_id": "46"}} + {"user": "test", "message": "some message with the number 46", "date": "2009-11-15T14:12:12", "likes": 46} + {"index":{"_id": "47"}} + {"user": "test", "message": "some message with the number 47", "date": "2009-11-15T14:12:12", "likes": 47} + {"index":{"_id": "48"}} + {"user": "test", "message": "some message with the number 48", "date": "2009-11-15T14:12:12", "likes": 48} + {"index":{"_id": "49"}} + {"user": "test", "message": "some message with the number 49", "date": "2009-11-15T14:12:12", "likes": 49} + {"index":{"_id": "50"}} + {"user": "test", "message": "some message with the number 50", "date": "2009-11-15T14:12:12", "likes": 50} + {"index":{"_id": "51"}} + {"user": "test", "message": "some message with the number 51", "date": "2009-11-15T14:12:12", "likes": 51} + {"index":{"_id": "52"}} + {"user": "test", "message": "some message with the number 52", "date": "2009-11-15T14:12:12", "likes": 52} + {"index":{"_id": "53"}} + {"user": "test", "message": "some message with the number 53", "date": "2009-11-15T14:12:12", "likes": 53} + {"index":{"_id": "54"}} + {"user": "test", "message": "some message with the number 54", "date": "2009-11-15T14:12:12", "likes": 54} + {"index":{"_id": "55"}} + {"user": "test", "message": "some message with the number 55", "date": "2009-11-15T14:12:12", "likes": 55} + {"index":{"_id": "56"}} + {"user": "test", "message": "some message with the number 56", "date": "2009-11-15T14:12:12", "likes": 56} + {"index":{"_id": "57"}} + {"user": "test", "message": "some message with the number 57", "date": "2009-11-15T14:12:12", "likes": 57} + {"index":{"_id": "58"}} + {"user": "test", "message": "some message with the number 58", "date": "2009-11-15T14:12:12", "likes": 58} + {"index":{"_id": "59"}} + {"user": "test", "message": "some message with the number 59", "date": "2009-11-15T14:12:12", "likes": 59} + {"index":{"_id": "60"}} + {"user": "test", "message": "some message with the number 60", "date": "2009-11-15T14:12:12", "likes": 60} + {"index":{"_id": "61"}} + {"user": "test", "message": "some message with the number 61", "date": "2009-11-15T14:12:12", "likes": 61} + {"index":{"_id": "62"}} + {"user": "test", "message": "some message with the number 62", "date": "2009-11-15T14:12:12", "likes": 62} + {"index":{"_id": "63"}} + {"user": "test", "message": "some message with the number 63", "date": "2009-11-15T14:12:12", "likes": 63} + {"index":{"_id": "64"}} + {"user": "test", "message": "some message with the number 64", "date": "2009-11-15T14:12:12", "likes": 64} + {"index":{"_id": "65"}} + {"user": "test", "message": "some message with the number 65", "date": "2009-11-15T14:12:12", "likes": 65} + {"index":{"_id": "66"}} + {"user": "test", "message": "some message with the number 66", "date": "2009-11-15T14:12:12", "likes": 66} + {"index":{"_id": "67"}} + {"user": "test", "message": "some message with the number 67", "date": "2009-11-15T14:12:12", "likes": 67} + {"index":{"_id": "68"}} + {"user": "test", "message": "some message with the number 68", "date": "2009-11-15T14:12:12", "likes": 68} + {"index":{"_id": "69"}} + {"user": "test", "message": "some message with the number 69", "date": "2009-11-15T14:12:12", "likes": 69} + {"index":{"_id": "70"}} + {"user": "test", "message": "some message with the number 70", "date": "2009-11-15T14:12:12", "likes": 70} + {"index":{"_id": "71"}} + {"user": "test", "message": "some message with the number 71", "date": "2009-11-15T14:12:12", "likes": 71} + {"index":{"_id": "72"}} + {"user": "test", "message": "some message with the number 72", "date": "2009-11-15T14:12:12", "likes": 72} + {"index":{"_id": "73"}} + {"user": "test", "message": "some message with the number 73", "date": "2009-11-15T14:12:12", "likes": 73} + {"index":{"_id": "74"}} + {"user": "test", "message": "some message with the number 74", "date": "2009-11-15T14:12:12", "likes": 74} + {"index":{"_id": "75"}} + {"user": "test", "message": "some message with the number 75", "date": "2009-11-15T14:12:12", "likes": 75} + {"index":{"_id": "76"}} + {"user": "test", "message": "some message with the number 76", "date": "2009-11-15T14:12:12", "likes": 76} + {"index":{"_id": "77"}} + {"user": "test", "message": "some message with the number 77", "date": "2009-11-15T14:12:12", "likes": 77} + {"index":{"_id": "78"}} + {"user": "test", "message": "some message with the number 78", "date": "2009-11-15T14:12:12", "likes": 78} + {"index":{"_id": "79"}} + {"user": "test", "message": "some message with the number 79", "date": "2009-11-15T14:12:12", "likes": 79} + {"index":{"_id": "80"}} + {"user": "test", "message": "some message with the number 80", "date": "2009-11-15T14:12:12", "likes": 80} + {"index":{"_id": "81"}} + {"user": "test", "message": "some message with the number 81", "date": "2009-11-15T14:12:12", "likes": 81} + {"index":{"_id": "82"}} + {"user": "test", "message": "some message with the number 82", "date": "2009-11-15T14:12:12", "likes": 82} + {"index":{"_id": "83"}} + {"user": "test", "message": "some message with the number 83", "date": "2009-11-15T14:12:12", "likes": 83} + {"index":{"_id": "84"}} + {"user": "test", "message": "some message with the number 84", "date": "2009-11-15T14:12:12", "likes": 84} + {"index":{"_id": "85"}} + {"user": "test", "message": "some message with the number 85", "date": "2009-11-15T14:12:12", "likes": 85} + {"index":{"_id": "86"}} + {"user": "test", "message": "some message with the number 86", "date": "2009-11-15T14:12:12", "likes": 86} + {"index":{"_id": "87"}} + {"user": "test", "message": "some message with the number 87", "date": "2009-11-15T14:12:12", "likes": 87} + {"index":{"_id": "88"}} + {"user": "test", "message": "some message with the number 88", "date": "2009-11-15T14:12:12", "likes": 88} + {"index":{"_id": "89"}} + {"user": "test", "message": "some message with the number 89", "date": "2009-11-15T14:12:12", "likes": 89} + {"index":{"_id": "90"}} + {"user": "test", "message": "some message with the number 90", "date": "2009-11-15T14:12:12", "likes": 90} + {"index":{"_id": "91"}} + {"user": "test", "message": "some message with the number 91", "date": "2009-11-15T14:12:12", "likes": 91} + {"index":{"_id": "92"}} + {"user": "test", "message": "some message with the number 92", "date": "2009-11-15T14:12:12", "likes": 92} + {"index":{"_id": "93"}} + {"user": "test", "message": "some message with the number 93", "date": "2009-11-15T14:12:12", "likes": 93} + {"index":{"_id": "94"}} + {"user": "test", "message": "some message with the number 94", "date": "2009-11-15T14:12:12", "likes": 94} + {"index":{"_id": "95"}} + {"user": "test", "message": "some message with the number 95", "date": "2009-11-15T14:12:12", "likes": 95} + {"index":{"_id": "96"}} + {"user": "test", "message": "some message with the number 96", "date": "2009-11-15T14:12:12", "likes": 96} + {"index":{"_id": "97"}} + {"user": "test", "message": "some message with the number 97", "date": "2009-11-15T14:12:12", "likes": 97} + {"index":{"_id": "98"}} + {"user": "test", "message": "some message with the number 98", "date": "2009-11-15T14:12:12", "likes": 98} + {"index":{"_id": "99"}} + {"user": "test", "message": "some message with the number 99", "date": "2009-11-15T14:12:12", "likes": 99} + {"index":{"_id": "100"}} + {"user": "test", "message": "some message with the number 100", "date": "2009-11-15T14:12:12", "likes": 100} + {"index":{"_id": "101"}} + {"user": "test", "message": "some message with the number 101", "date": "2009-11-15T14:12:12", "likes": 101} + {"index":{"_id": "102"}} + {"user": "test", "message": "some message with the number 102", "date": "2009-11-15T14:12:12", "likes": 102} + {"index":{"_id": "103"}} + {"user": "test", "message": "some message with the number 103", "date": "2009-11-15T14:12:12", "likes": 103} + {"index":{"_id": "104"}} + {"user": "test", "message": "some message with the number 104", "date": "2009-11-15T14:12:12", "likes": 104} + {"index":{"_id": "105"}} + {"user": "test", "message": "some message with the number 105", "date": "2009-11-15T14:12:12", "likes": 105} + {"index":{"_id": "106"}} + {"user": "test", "message": "some message with the number 106", "date": "2009-11-15T14:12:12", "likes": 106} + {"index":{"_id": "107"}} + {"user": "test", "message": "some message with the number 107", "date": "2009-11-15T14:12:12", "likes": 107} + {"index":{"_id": "108"}} + {"user": "test", "message": "some message with the number 108", "date": "2009-11-15T14:12:12", "likes": 108} + {"index":{"_id": "109"}} + {"user": "test", "message": "some message with the number 109", "date": "2009-11-15T14:12:12", "likes": 109} + {"index":{"_id": "110"}} + {"user": "test", "message": "some message with the number 110", "date": "2009-11-15T14:12:12", "likes": 110} + {"index":{"_id": "111"}} + {"user": "test", "message": "some message with the number 111", "date": "2009-11-15T14:12:12", "likes": 111} + {"index":{"_id": "112"}} + {"user": "test", "message": "some message with the number 112", "date": "2009-11-15T14:12:12", "likes": 112} + {"index":{"_id": "113"}} + {"user": "test", "message": "some message with the number 113", "date": "2009-11-15T14:12:12", "likes": 113} + {"index":{"_id": "114"}} + {"user": "test", "message": "some message with the number 114", "date": "2009-11-15T14:12:12", "likes": 114} + {"index":{"_id": "115"}} + {"user": "test", "message": "some message with the number 115", "date": "2009-11-15T14:12:12", "likes": 115} + {"index":{"_id": "116"}} + {"user": "test", "message": "some message with the number 116", "date": "2009-11-15T14:12:12", "likes": 116} + {"index":{"_id": "117"}} + {"user": "test", "message": "some message with the number 117", "date": "2009-11-15T14:12:12", "likes": 117} + {"index":{"_id": "118"}} + {"user": "test", "message": "some message with the number 118", "date": "2009-11-15T14:12:12", "likes": 118} + {"index":{"_id": "119"}} + {"user": "test", "message": "some message with the number 119", "date": "2009-11-15T14:12:12", "likes": 119} +emit snippet +query part: conflicts=proceed +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: conflicts=proceed +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: conflicts=proceed +body part: +{ + "query": { + "term": { + "user": "kimchy" + } + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "script": { + "inline": "ctx._source.likes++" + }, + "query": { + "term": { + "user": "kimchy" + } + } +} +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +query part: wait_for_status=yellow +emit snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: routing=1 +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +query part: scroll_size=100 +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "description" : "sets foo", + "processors" : [ { + "set" : { + "field": "foo", + "value": "bar" + } + } ] +} +emit snippet +query part: pipeline=set-foo +handle snippet +handle snippet +test snippet +emit snippet +query part: detailed=true&action=*byquery +handle snippet +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +query part: requests_per_second=unlimited +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "test": { + "dynamic": false, + "properties": { + "text": {"type": "text"} + } + } + } +} +emit snippet +query part: refresh +body part: +{ + "text": "words words", + "flag": "bar" +} +emit snippet +query part: refresh +body part: +{ + "text": "words words", + "flag": "foo" +} +emit snippet +body part: +{ + "properties": { + "text": {"type": "text"}, + "flag": {"type": "text", "analyzer": "keyword"} + } +} +handle snippet +test snippet +emit snippet +query part: filter_path=hits.total +body part: +{ + "query": { + "match": { + "flag": "foo" + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +query part: refresh&conflicts=proceed +emit snippet +query part: filter_path=hits.total +body part: +{ + "query": { + "match": { + "flag": "foo" + } + } +} +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} + {"index":{"_id": "5"}} + {"user": "test", "message": "some message with the number 5", "date": "2009-11-15T14:12:12", "likes": 5} + {"index":{"_id": "6"}} + {"user": "test", "message": "some message with the number 6", "date": "2009-11-15T14:12:12", "likes": 6} + {"index":{"_id": "7"}} + {"user": "test", "message": "some message with the number 7", "date": "2009-11-15T14:12:12", "likes": 7} + {"index":{"_id": "8"}} + {"user": "test", "message": "some message with the number 8", "date": "2009-11-15T14:12:12", "likes": 8} + {"index":{"_id": "9"}} + {"user": "test", "message": "some message with the number 9", "date": "2009-11-15T14:12:12", "likes": 9} + {"index":{"_id": "10"}} + {"user": "test", "message": "some message with the number 10", "date": "2009-11-15T14:12:12", "likes": 10} + {"index":{"_id": "11"}} + {"user": "test", "message": "some message with the number 11", "date": "2009-11-15T14:12:12", "likes": 11} + {"index":{"_id": "12"}} + {"user": "test", "message": "some message with the number 12", "date": "2009-11-15T14:12:12", "likes": 12} + {"index":{"_id": "13"}} + {"user": "test", "message": "some message with the number 13", "date": "2009-11-15T14:12:12", "likes": 13} + {"index":{"_id": "14"}} + {"user": "test", "message": "some message with the number 14", "date": "2009-11-15T14:12:12", "likes": 14} + {"index":{"_id": "15"}} + {"user": "test", "message": "some message with the number 15", "date": "2009-11-15T14:12:12", "likes": 15} + {"index":{"_id": "16"}} + {"user": "test", "message": "some message with the number 16", "date": "2009-11-15T14:12:12", "likes": 16} + {"index":{"_id": "17"}} + {"user": "test", "message": "some message with the number 17", "date": "2009-11-15T14:12:12", "likes": 17} + {"index":{"_id": "18"}} + {"user": "test", "message": "some message with the number 18", "date": "2009-11-15T14:12:12", "likes": 18} + {"index":{"_id": "19"}} + {"user": "test", "message": "some message with the number 19", "date": "2009-11-15T14:12:12", "likes": 19} + {"index":{"_id": "20"}} + {"user": "test", "message": "some message with the number 20", "date": "2009-11-15T14:12:12", "likes": 20} + {"index":{"_id": "21"}} + {"user": "test", "message": "some message with the number 21", "date": "2009-11-15T14:12:12", "likes": 21} + {"index":{"_id": "22"}} + {"user": "test", "message": "some message with the number 22", "date": "2009-11-15T14:12:12", "likes": 22} + {"index":{"_id": "23"}} + {"user": "test", "message": "some message with the number 23", "date": "2009-11-15T14:12:12", "likes": 23} + {"index":{"_id": "24"}} + {"user": "test", "message": "some message with the number 24", "date": "2009-11-15T14:12:12", "likes": 24} + {"index":{"_id": "25"}} + {"user": "test", "message": "some message with the number 25", "date": "2009-11-15T14:12:12", "likes": 25} + {"index":{"_id": "26"}} + {"user": "test", "message": "some message with the number 26", "date": "2009-11-15T14:12:12", "likes": 26} + {"index":{"_id": "27"}} + {"user": "test", "message": "some message with the number 27", "date": "2009-11-15T14:12:12", "likes": 27} + {"index":{"_id": "28"}} + {"user": "test", "message": "some message with the number 28", "date": "2009-11-15T14:12:12", "likes": 28} + {"index":{"_id": "29"}} + {"user": "test", "message": "some message with the number 29", "date": "2009-11-15T14:12:12", "likes": 29} + {"index":{"_id": "30"}} + {"user": "test", "message": "some message with the number 30", "date": "2009-11-15T14:12:12", "likes": 30} + {"index":{"_id": "31"}} + {"user": "test", "message": "some message with the number 31", "date": "2009-11-15T14:12:12", "likes": 31} + {"index":{"_id": "32"}} + {"user": "test", "message": "some message with the number 32", "date": "2009-11-15T14:12:12", "likes": 32} + {"index":{"_id": "33"}} + {"user": "test", "message": "some message with the number 33", "date": "2009-11-15T14:12:12", "likes": 33} + {"index":{"_id": "34"}} + {"user": "test", "message": "some message with the number 34", "date": "2009-11-15T14:12:12", "likes": 34} + {"index":{"_id": "35"}} + {"user": "test", "message": "some message with the number 35", "date": "2009-11-15T14:12:12", "likes": 35} + {"index":{"_id": "36"}} + {"user": "test", "message": "some message with the number 36", "date": "2009-11-15T14:12:12", "likes": 36} + {"index":{"_id": "37"}} + {"user": "test", "message": "some message with the number 37", "date": "2009-11-15T14:12:12", "likes": 37} + {"index":{"_id": "38"}} + {"user": "test", "message": "some message with the number 38", "date": "2009-11-15T14:12:12", "likes": 38} + {"index":{"_id": "39"}} + {"user": "test", "message": "some message with the number 39", "date": "2009-11-15T14:12:12", "likes": 39} + {"index":{"_id": "40"}} + {"user": "test", "message": "some message with the number 40", "date": "2009-11-15T14:12:12", "likes": 40} + {"index":{"_id": "41"}} + {"user": "test", "message": "some message with the number 41", "date": "2009-11-15T14:12:12", "likes": 41} + {"index":{"_id": "42"}} + {"user": "test", "message": "some message with the number 42", "date": "2009-11-15T14:12:12", "likes": 42} + {"index":{"_id": "43"}} + {"user": "test", "message": "some message with the number 43", "date": "2009-11-15T14:12:12", "likes": 43} + {"index":{"_id": "44"}} + {"user": "test", "message": "some message with the number 44", "date": "2009-11-15T14:12:12", "likes": 44} + {"index":{"_id": "45"}} + {"user": "test", "message": "some message with the number 45", "date": "2009-11-15T14:12:12", "likes": 45} + {"index":{"_id": "46"}} + {"user": "test", "message": "some message with the number 46", "date": "2009-11-15T14:12:12", "likes": 46} + {"index":{"_id": "47"}} + {"user": "test", "message": "some message with the number 47", "date": "2009-11-15T14:12:12", "likes": 47} + {"index":{"_id": "48"}} + {"user": "test", "message": "some message with the number 48", "date": "2009-11-15T14:12:12", "likes": 48} + {"index":{"_id": "49"}} + {"user": "test", "message": "some message with the number 49", "date": "2009-11-15T14:12:12", "likes": 49} + {"index":{"_id": "50"}} + {"user": "test", "message": "some message with the number 50", "date": "2009-11-15T14:12:12", "likes": 50} + {"index":{"_id": "51"}} + {"user": "test", "message": "some message with the number 51", "date": "2009-11-15T14:12:12", "likes": 51} + {"index":{"_id": "52"}} + {"user": "test", "message": "some message with the number 52", "date": "2009-11-15T14:12:12", "likes": 52} + {"index":{"_id": "53"}} + {"user": "test", "message": "some message with the number 53", "date": "2009-11-15T14:12:12", "likes": 53} + {"index":{"_id": "54"}} + {"user": "test", "message": "some message with the number 54", "date": "2009-11-15T14:12:12", "likes": 54} + {"index":{"_id": "55"}} + {"user": "test", "message": "some message with the number 55", "date": "2009-11-15T14:12:12", "likes": 55} + {"index":{"_id": "56"}} + {"user": "test", "message": "some message with the number 56", "date": "2009-11-15T14:12:12", "likes": 56} + {"index":{"_id": "57"}} + {"user": "test", "message": "some message with the number 57", "date": "2009-11-15T14:12:12", "likes": 57} + {"index":{"_id": "58"}} + {"user": "test", "message": "some message with the number 58", "date": "2009-11-15T14:12:12", "likes": 58} + {"index":{"_id": "59"}} + {"user": "test", "message": "some message with the number 59", "date": "2009-11-15T14:12:12", "likes": 59} + {"index":{"_id": "60"}} + {"user": "test", "message": "some message with the number 60", "date": "2009-11-15T14:12:12", "likes": 60} + {"index":{"_id": "61"}} + {"user": "test", "message": "some message with the number 61", "date": "2009-11-15T14:12:12", "likes": 61} + {"index":{"_id": "62"}} + {"user": "test", "message": "some message with the number 62", "date": "2009-11-15T14:12:12", "likes": 62} + {"index":{"_id": "63"}} + {"user": "test", "message": "some message with the number 63", "date": "2009-11-15T14:12:12", "likes": 63} + {"index":{"_id": "64"}} + {"user": "test", "message": "some message with the number 64", "date": "2009-11-15T14:12:12", "likes": 64} + {"index":{"_id": "65"}} + {"user": "test", "message": "some message with the number 65", "date": "2009-11-15T14:12:12", "likes": 65} + {"index":{"_id": "66"}} + {"user": "test", "message": "some message with the number 66", "date": "2009-11-15T14:12:12", "likes": 66} + {"index":{"_id": "67"}} + {"user": "test", "message": "some message with the number 67", "date": "2009-11-15T14:12:12", "likes": 67} + {"index":{"_id": "68"}} + {"user": "test", "message": "some message with the number 68", "date": "2009-11-15T14:12:12", "likes": 68} + {"index":{"_id": "69"}} + {"user": "test", "message": "some message with the number 69", "date": "2009-11-15T14:12:12", "likes": 69} + {"index":{"_id": "70"}} + {"user": "test", "message": "some message with the number 70", "date": "2009-11-15T14:12:12", "likes": 70} + {"index":{"_id": "71"}} + {"user": "test", "message": "some message with the number 71", "date": "2009-11-15T14:12:12", "likes": 71} + {"index":{"_id": "72"}} + {"user": "test", "message": "some message with the number 72", "date": "2009-11-15T14:12:12", "likes": 72} + {"index":{"_id": "73"}} + {"user": "test", "message": "some message with the number 73", "date": "2009-11-15T14:12:12", "likes": 73} + {"index":{"_id": "74"}} + {"user": "test", "message": "some message with the number 74", "date": "2009-11-15T14:12:12", "likes": 74} + {"index":{"_id": "75"}} + {"user": "test", "message": "some message with the number 75", "date": "2009-11-15T14:12:12", "likes": 75} + {"index":{"_id": "76"}} + {"user": "test", "message": "some message with the number 76", "date": "2009-11-15T14:12:12", "likes": 76} + {"index":{"_id": "77"}} + {"user": "test", "message": "some message with the number 77", "date": "2009-11-15T14:12:12", "likes": 77} + {"index":{"_id": "78"}} + {"user": "test", "message": "some message with the number 78", "date": "2009-11-15T14:12:12", "likes": 78} + {"index":{"_id": "79"}} + {"user": "test", "message": "some message with the number 79", "date": "2009-11-15T14:12:12", "likes": 79} + {"index":{"_id": "80"}} + {"user": "test", "message": "some message with the number 80", "date": "2009-11-15T14:12:12", "likes": 80} + {"index":{"_id": "81"}} + {"user": "test", "message": "some message with the number 81", "date": "2009-11-15T14:12:12", "likes": 81} + {"index":{"_id": "82"}} + {"user": "test", "message": "some message with the number 82", "date": "2009-11-15T14:12:12", "likes": 82} + {"index":{"_id": "83"}} + {"user": "test", "message": "some message with the number 83", "date": "2009-11-15T14:12:12", "likes": 83} + {"index":{"_id": "84"}} + {"user": "test", "message": "some message with the number 84", "date": "2009-11-15T14:12:12", "likes": 84} + {"index":{"_id": "85"}} + {"user": "test", "message": "some message with the number 85", "date": "2009-11-15T14:12:12", "likes": 85} + {"index":{"_id": "86"}} + {"user": "test", "message": "some message with the number 86", "date": "2009-11-15T14:12:12", "likes": 86} + {"index":{"_id": "87"}} + {"user": "test", "message": "some message with the number 87", "date": "2009-11-15T14:12:12", "likes": 87} + {"index":{"_id": "88"}} + {"user": "test", "message": "some message with the number 88", "date": "2009-11-15T14:12:12", "likes": 88} + {"index":{"_id": "89"}} + {"user": "test", "message": "some message with the number 89", "date": "2009-11-15T14:12:12", "likes": 89} + {"index":{"_id": "90"}} + {"user": "test", "message": "some message with the number 90", "date": "2009-11-15T14:12:12", "likes": 90} + {"index":{"_id": "91"}} + {"user": "test", "message": "some message with the number 91", "date": "2009-11-15T14:12:12", "likes": 91} + {"index":{"_id": "92"}} + {"user": "test", "message": "some message with the number 92", "date": "2009-11-15T14:12:12", "likes": 92} + {"index":{"_id": "93"}} + {"user": "test", "message": "some message with the number 93", "date": "2009-11-15T14:12:12", "likes": 93} + {"index":{"_id": "94"}} + {"user": "test", "message": "some message with the number 94", "date": "2009-11-15T14:12:12", "likes": 94} + {"index":{"_id": "95"}} + {"user": "test", "message": "some message with the number 95", "date": "2009-11-15T14:12:12", "likes": 95} + {"index":{"_id": "96"}} + {"user": "test", "message": "some message with the number 96", "date": "2009-11-15T14:12:12", "likes": 96} + {"index":{"_id": "97"}} + {"user": "test", "message": "some message with the number 97", "date": "2009-11-15T14:12:12", "likes": 97} + {"index":{"_id": "98"}} + {"user": "test", "message": "some message with the number 98", "date": "2009-11-15T14:12:12", "likes": 98} + {"index":{"_id": "99"}} + {"user": "test", "message": "some message with the number 99", "date": "2009-11-15T14:12:12", "likes": 99} + {"index":{"_id": "100"}} + {"user": "test", "message": "some message with the number 100", "date": "2009-11-15T14:12:12", "likes": 100} + {"index":{"_id": "101"}} + {"user": "test", "message": "some message with the number 101", "date": "2009-11-15T14:12:12", "likes": 101} + {"index":{"_id": "102"}} + {"user": "test", "message": "some message with the number 102", "date": "2009-11-15T14:12:12", "likes": 102} + {"index":{"_id": "103"}} + {"user": "test", "message": "some message with the number 103", "date": "2009-11-15T14:12:12", "likes": 103} + {"index":{"_id": "104"}} + {"user": "test", "message": "some message with the number 104", "date": "2009-11-15T14:12:12", "likes": 104} + {"index":{"_id": "105"}} + {"user": "test", "message": "some message with the number 105", "date": "2009-11-15T14:12:12", "likes": 105} + {"index":{"_id": "106"}} + {"user": "test", "message": "some message with the number 106", "date": "2009-11-15T14:12:12", "likes": 106} + {"index":{"_id": "107"}} + {"user": "test", "message": "some message with the number 107", "date": "2009-11-15T14:12:12", "likes": 107} + {"index":{"_id": "108"}} + {"user": "test", "message": "some message with the number 108", "date": "2009-11-15T14:12:12", "likes": 108} + {"index":{"_id": "109"}} + {"user": "test", "message": "some message with the number 109", "date": "2009-11-15T14:12:12", "likes": 109} + {"index":{"_id": "110"}} + {"user": "test", "message": "some message with the number 110", "date": "2009-11-15T14:12:12", "likes": 110} + {"index":{"_id": "111"}} + {"user": "test", "message": "some message with the number 111", "date": "2009-11-15T14:12:12", "likes": 111} + {"index":{"_id": "112"}} + {"user": "test", "message": "some message with the number 112", "date": "2009-11-15T14:12:12", "likes": 112} + {"index":{"_id": "113"}} + {"user": "test", "message": "some message with the number 113", "date": "2009-11-15T14:12:12", "likes": 113} + {"index":{"_id": "114"}} + {"user": "test", "message": "some message with the number 114", "date": "2009-11-15T14:12:12", "likes": 114} + {"index":{"_id": "115"}} + {"user": "test", "message": "some message with the number 115", "date": "2009-11-15T14:12:12", "likes": 115} + {"index":{"_id": "116"}} + {"user": "test", "message": "some message with the number 116", "date": "2009-11-15T14:12:12", "likes": 116} + {"index":{"_id": "117"}} + {"user": "test", "message": "some message with the number 117", "date": "2009-11-15T14:12:12", "likes": 117} + {"index":{"_id": "118"}} + {"user": "test", "message": "some message with the number 118", "date": "2009-11-15T14:12:12", "likes": 118} + {"index":{"_id": "119"}} + {"user": "test", "message": "some message with the number 119", "date": "2009-11-15T14:12:12", "likes": 119} +emit snippet +body part: +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter" + } +} +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter", + "version_type": "internal" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter", + "version_type": "external" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter", + "op_type": "create" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "conflicts": "proceed", + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter", + "op_type": "create" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "source": { + "index": "twitter", + "type": "tweet", + "query": { + "term": { + "user": "kimchy" + } + } + }, + "dest": { + "index": "new_twitter" + } +} +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "source": { + "index": ["twitter", "blog"], + "type": ["tweet", "post"] + }, + "dest": { + "index": "all_together" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "size": 1, + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "size": 10000, + "source": { + "index": "twitter", + "sort": { "date": "desc" } + }, + "dest": { + "index": "new_twitter" + } +} +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +body part: +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter", + "version_type": "external" + }, + "script": { + "inline": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}" + } +} +handle snippet +test snippet +emit snippet +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "source": { + "index": "source", + "query": { + "match": { + "company": "cat" + } + } + }, + "dest": { + "index": "dest", + "routing": "=cat" + } +} +handle snippet +test snippet +emit snippet +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "source": { + "index": "source", + "size": 100 + }, + "dest": { + "index": "dest", + "routing": "=cat" + } +} +handle snippet +test snippet +emit snippet +emit snippet +query part: wait_for_status=yellow +emit snippet +body part: +{ + "source": { + "index": "source" + }, + "dest": { + "index": "dest", + "pipeline": "some_ingest_pipeline" + } +} +handle snippet +handle snippet +test snippet +emit snippet +query part: detailed=true&actions=*reindex +handle snippet +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +query part: requests_per_second=unlimited +handle snippet +test snippet +emit snippet +query part: refresh +body part: +{ + "text": "words words", + "flag": "foo" +} +handle snippet +test snippet +emit snippet +body part: +{ + "source": { + "index": "test" + }, + "dest": { + "index": "test2" + }, + "script": { + "inline": "ctx._source.tag = ctx._source.remove(\"flag\")" + } +} +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +catch part: request +query part: pipeline=my_pipeline_id +body part: +{ + "foo": "bar" +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "transient": { + "cluster.routing.allocation.enable": "none" + } +} +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "transient": { + "cluster.routing.allocation.enable": "all" + } +} +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "persistent": { + "cluster.routing.allocation.enable": "none" + } +} +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "persistent": { + "cluster.routing.allocation.enable": "all" + } +} +handle snippet +test snippet +emit snippet +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "user": { + "_all": { "enabled": false }, + "properties": { + "title": { "type": "text" }, + "name": { "type": "text" }, + "age": { "type": "integer" } + } + }, + "blogpost": { + "_all": { "enabled": false }, + "properties": { + "title": { "type": "text" }, + "body": { "type": "text" }, + "user_id": { + "type": "keyword" + }, + "created": { + "type": "date", + "format": "strict_date_optional_time||epoch_millis" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ "count": 5 } +handle snippet +test snippet +emit snippet +body part: +{ + "index.mapper.dynamic":false +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "session": { + "properties": { + "user_id": { + "type": "keyword" + }, + "last_updated": { + "type": "date" + }, + "session_data": { + "enabled": false + } + } + } + } +} +emit snippet +body part: +{ + "user_id": "kimchy", + "session_data": { + "arbitrary_object": { + "some_array": [ "foo", "bar", { "baz": 2 } ] + } + }, + "last_updated": "2015-12-06T18:20:22" +} +emit snippet +body part: +{ + "user_id": "jpountz", + "session_data": "none", + "last_updated": "2015-12-06T18:22:13" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "session": { + "enabled": false + } + } +} +emit snippet +body part: +{ + "user_id": "kimchy", + "session_data": { + "arbitrary_object": { + "some_array": [ "foo", "bar", { "baz": 2 } ] + } + }, + "last_updated": "2015-12-06T18:20:22" +} +emit snippet +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "location": { + "type": "geo_point", + "geohash_prefix": true, + "geohash_precision": 6 + } + } + } + } +} +emit snippet +body part: +{ + "location": { + "lat": 41.12, + "lon": -71.34 + } +} +emit snippet +query part: fielddata_fields=location.geohash +body part: +{ + "query": { + "term": { + "location.geohash": "drm3bt" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "first_name": { + "type": "text", + "copy_to": "full_name" + }, + "last_name": { + "type": "text", + "copy_to": "full_name" + }, + "full_name": { + "type": "text" + } + } + } + } +} +emit snippet +body part: +{ + "first_name": "John", + "last_name": "Smith" +} +emit snippet +body part: +{ + "query": { + "match": { + "full_name": { + "query": "John Smith", + "operator": "and" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "title": { + "type": "text", + "store": true + }, + "date": { + "type": "date", + "store": true + }, + "content": { + "type": "text" + } + } + } + } +} +emit snippet +body part: +{ + "title": "Some short title", + "date": "2015-01-01", + "content": "A very long content field..." +} +emit snippet +body part: +{ + "fields": [ "title", "date" ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "status_code": { + "type": "keyword", + "null_value": "NULL" + } + } + } + } +} +emit snippet +body part: +{ + "status_code": null +} +emit snippet +body part: +{ + "status_code": [] +} +emit snippet +body part: +{ + "query": { + "term": { + "status_code": "NULL" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "number_one": { + "type": "integer" + }, + "number_two": { + "type": "integer", + "coerce": false + } + } + } + } +} +emit snippet +body part: +{ + "number_one": "10" +} +emit snippet +catch part: request +body part: +{ + "number_two": "10" +} +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "index.mapping.coerce": false + }, + "mappings": { + "my_type": { + "properties": { + "number_one": { + "type": "integer", + "coerce": true + }, + "number_two": { + "type": "integer" + } + } + } + } +} +emit snippet +body part: +{ "number_one": "10" } +emit snippet +catch part: request +body part: +{ "number_two": "10" } +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "analysis": { + "filter": { + "autocomplete_filter": { + "type": "edge_ngram", + "min_gram": 1, + "max_gram": 20 + } + }, + "analyzer": { + "autocomplete": { + "type": "custom", + "tokenizer": "standard", + "filter": [ + "lowercase", + "autocomplete_filter" + ] + } + } + } + }, + "mappings": { + "my_type": { + "properties": { + "text": { + "type": "text", + "analyzer": "autocomplete", + "search_analyzer": "standard" + } + } + } + } +} +emit snippet +body part: +{ + "text": "Quick Brown Fox" +} +emit snippet +body part: +{ + "query": { + "match": { + "text": { + "query": "Quick Br", + "operator": "and" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets" + } + } + } + } +} +emit snippet +body part: +{ + "text": "Quick brown fox" +} +emit snippet +body part: +{ + "query": { + "match": { + "text": "brown fox" + } + }, + "highlight": { + "fields": { + "text": {} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "city": { + "type": "text", + "fields": { + "raw": { + "type": "keyword" + } + } + } + } + } + } +} +emit snippet +body part: +{ + "city": "New York" +} +emit snippet +body part: +{ + "city": "York" +} +emit snippet +body part: +{ + "query": { + "match": { + "city": "york" + } + }, + "sort": { + "city.raw": "asc" + }, + "aggs": { + "Cities": { + "terms": { + "field": "city.raw" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "text": { + "type": "text", + "fields": { + "english": { + "type": "text", + "analyzer": "english" + } + } + } + } + } + } +} +emit snippet +body part: +{ "text": "quick brown fox" } +emit snippet +body part: +{ "text": "quick brown foxes" } +emit snippet +body part: +{ + "query": { + "multi_match": { + "query": "quick brown foxes", + "fields": [ + "text", + "text.english" + ], + "type": "most_fields" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "tag": { + "type": "text", + "fielddata": true, + "fielddata_frequency_filter": { + "min": 0.001, + "max": 0.1, + "min_segment_size": 500 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "number_one": { + "type": "integer", + "ignore_malformed": true + }, + "number_two": { + "type": "integer" + } + } + } + } +} +emit snippet +body part: +{ + "text": "Some text value", + "number_one": "foo" +} +emit snippet +catch part: request +body part: +{ + "text": "Some text value", + "number_two": "foo" +} +handle snippet +test snippet +emit snippet +body part: +{ + "settings": { + "index.mapping.ignore_malformed": true + }, + "mappings": { + "my_type": { + "properties": { + "number_one": { + "type": "byte" + }, + "number_two": { + "type": "integer", + "ignore_malformed": false + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "username": "johnsmith", + "name": { + "first": "John", + "last": "Smith" + } +} +emit snippet +emit snippet +body part: +{ + "username": "marywhite", + "email": "mary@white.com", + "name": { + "first": "Mary", + "middle": "Alice", + "last": "White" + } +} +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "dynamic": false, + "properties": { + "user": { + "properties": { + "name": { + "type": "text" + }, + "social_networks": { + "dynamic": true, + "properties": {} + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "names": [ "John Abraham", "Lincoln Smith"] +} +emit snippet +body part: +{ + "query": { + "match_phrase": { + "names": { + "query": "Abraham Lincoln" + } + } + } +} +emit snippet +body part: +{ + "query": { + "match_phrase": { + "names": { + "query": "Abraham Lincoln", + "slop": 101 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "groups": { + "properties": { + "names": { + "type": "text", + "position_increment_gap": 0 + } + } + } + } +} +emit snippet +body part: +{ + "names": [ "John Abraham", "Lincoln Smith"] +} +emit snippet +body part: +{ + "query": { + "match_phrase": { + "names": "Abraham Lincoln" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "message": { + "type": "keyword", + "ignore_above": 20 + } + } + } + } +} +emit snippet +body part: +{ + "message": "Syntax error" +} +emit snippet +body part: +{ + "message": "Syntax error with some long stacktrace" +} +emit snippet +body part: +{ + "aggs": { + "messages": { + "terms": { + "field": "message" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "location": { + "type": "geo_point", + "lat_lon": true + } + } + } + } +} +emit snippet +body part: +{ + "location": { + "lat": 41.12, + "lon": -71.34 + } +} +emit snippet +body part: +{ + "query": { + "geo_distance": { + "location": { + "lat": 41, + "lon": -71 + }, + "distance": "50km", + "optimize_bbox": "indexed" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "location": { + "type": "geo_point", + "geohash_prefix": true, + "geohash_precision": 6 + } + } + } + } +} +emit snippet +body part: +{ + "location": { + "lat": 41.12, + "lon": -71.34 + } +} +emit snippet +query part: fielddata_fields=location.geohash +body part: +{ + "query": { + "geohash_cell": { + "location": { + "lat": 41.02, + "lon": -71.48 + }, + "precision": 4, + "neighbors": true + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "status_code": { + "type": "keyword" + }, + "session_id": { + "type": "keyword", + "doc_values": false + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "text": { + "type": "text", + "index_options": "offsets" + } + } + } + } +} +emit snippet +body part: +{ + "text": "Quick brown fox" +} +emit snippet +body part: +{ + "query": { + "match": { + "text": "brown fox" + } + }, + "highlight": { + "fields": { + "text": {} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "manager": { + "properties": { + "age": { "type": "integer" }, + "name": { "type": "text" } + } + }, + "employees": { + "type": "nested", + "properties": { + "age": { "type": "integer" }, + "name": { "type": "text" } + } + } + } + } + } +} +emit snippet +body part: +{ + "region": "US", + "manager": { + "name": "Alice White", + "age": 30 + }, + "employees": [ + { + "name": "John Smith", + "age": 34 + }, + { + "name": "Peter Brown", + "age": 26 + } + ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match": { + "manager.name": "Alice White" + } + }, + "aggs": { + "Employees": { + "nested": { + "path": "employees" + }, + "aggs": { + "Employee Ages": { + "histogram": { + "field": "employees.age", + "interval": 5 + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "title": { + "type": "text" + }, + "content": { + "type": "text" + }, + "date": { + "type": "date", + "include_in_all": false + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "include_in_all": false, + "properties": { + "title": { "type": "text" }, + "author": { + "include_in_all": true, + "properties": { + "first_name": { "type": "text" }, + "last_name": { "type": "text" } + } + }, + "editor": { + "properties": { + "first_name": { "type": "text" }, + "last_name": { "type": "text", "include_in_all": true } + } + } + } + } + } +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "properties": { + "title": { + "type": "text", + "norms": false + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "title": { + "type": "text", + "boost": 2 + }, + "content": { + "type": "text" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match" : { + "title": { + "query": "quick brown fox" + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "match" : { + "title": { + "query": "quick brown fox", + "boost": 2 + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "default_field": { + "type": "text" + }, + "bm25_field": { + "type": "text", + "similarity": "BM25" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "location": { + "type": "geo_point", + "geohash": true + } + } + } + } +} +emit snippet +body part: +{ + "location": { + "lat": 41.12, + "lon": -71.34 + } +} +emit snippet +query part: fielddata_fields=location.geohash +body part: +{ + "query": { + "prefix": { + "location.geohash": "drm3b" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "text": { + "type": "text", + "fields": { + "english": { + "type": "text", + "analyzer": "english" + } + } + } + } + } + } +} +emit snippet +query part: wait_for_status=yellow +emit snippet +query part: field=text +body part: +{ + "text": "The quick Brown Foxes." +} +emit snippet +query part: field=text.english +body part: +{ + "text": "The quick Brown Foxes." +} +handle snippet +test snippet +emit snippet +body part: +{ + "settings":{ + "analysis":{ + "analyzer":{ + "my_analyzer":{ + "type":"custom", + "tokenizer":"standard", + "filter":[ + "lowercase" + ] + }, + "my_stop_analyzer":{ + "type":"custom", + "tokenizer":"standard", + "filter":[ + "lowercase", + "english_stop" + ] + } + }, + "filter":{ + "english_stop":{ + "type":"stop", + "stopwords":"_english_" + } + } + } + }, + "mappings":{ + "my_type":{ + "properties":{ + "title": { + "type":"text", + "analyzer":"my_analyzer", + "search_analyzer":"my_stop_analyzer", + "search_quote_analyzer":"my_analyzer" + } + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "region": "US", + "manager": { + "age": 30, + "name": { + "first": "John", + "last": "Smith" + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "region": { + "type": "keyword" + }, + "manager": { + "properties": { + "age": { "type": "integer" }, + "name": { + "properties": { + "first": { "type": "text" }, + "last": { "type": "text" } + } + } + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "date": { + "type": "date" + } + } + } + } +} +emit snippet +body part: +{ "date": "2015-01-01" } +emit snippet +body part: +{ "date": "2015-01-01T12:10:30Z" } +emit snippet +body part: +{ "date": 1420070400001 } +emit snippet +body part: +{ + "sort": { "date": "asc"} +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "date": { + "type": "date", + "format": "yyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "full_name": { + "type": "text" + } + } + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "name": { + "type": "text", + "fields": { + "length": { + "type": "token_count", + "analyzer": "standard" + } + } + } + } + } + } +} +emit snippet +body part: +{ "name": "John Smith" } +emit snippet +body part: +{ "name": "Rachel Alice Williams" } +emit snippet +body part: +{ + "query": { + "term": { + "name.length": 3 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "group" : "fans", + "user" : [ + { + "first" : "John", + "last" : "Smith" + }, + { + "first" : "Alice", + "last" : "White" + } + ] +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "bool": { + "must": [ + { "match": { "user.first": "Alice" }}, + { "match": { "user.last": "Smith" }} + ] + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "user": { + "type": "nested" + } + } + } + } +} +emit snippet +body part: +{ + "group" : "fans", + "user" : [ + { + "first" : "John", + "last" : "Smith" + }, + { + "first" : "Alice", + "last" : "White" + } + ] +} +emit snippet +body part: +{ + "query": { + "nested": { + "path": "user", + "query": { + "bool": { + "must": [ + { "match": { "user.first": "Alice" }}, + { "match": { "user.last": "Smith" }} + ] + } + } + } + } +} +emit snippet +body part: +{ + "query": { + "nested": { + "path": "user", + "query": { + "bool": { + "must": [ + { "match": { "user.first": "Alice" }}, + { "match": { "user.last": "White" }} + ] + } + }, + "inner_hits": { + "highlight": { + "fields": { + "user.first": {} + } + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "is_published": { + "type": "boolean" + } + } + } + } +} +emit snippet +body part: +{ + "is_published": true +} +emit snippet +body part: +{ + "query": { + "term": { + "is_published": 1 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "is_published": true +} +emit snippet +body part: +{ + "is_published": false +} +emit snippet +body part: +{ + "aggs": { + "publish_state": { + "terms": { + "field": "is_published" + } + } + }, + "script_fields": { + "is_published": { + "script": "doc['is_published'].value" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "number_of_bytes": { + "type": "integer" + }, + "time_in_seconds": { + "type": "float" + } + } + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "tags": { + "type": "keyword" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } +} +emit snippet +body part: +{ + "text": "Geo-point as an object", + "location": { + "lat": 41.12, + "lon": -71.34 + } +} +emit snippet +body part: +{ + "text": "Geo-point as a string", + "location": "41.12,-71.34" +} +emit snippet +body part: +{ + "text": "Geo-point as a geohash", + "location": "drm3btev3e86" +} +emit snippet +body part: +{ + "text": "Geo-point as an array", + "location": [ -71.34, 41.12 ] +} +emit snippet +body part: +{ + "query": { + "geo_bounding_box": { + "location": { + "top_left": { + "lat": 42, + "lon": -72 + }, + "bottom_right": { + "lat": 40, + "lon": -74 + } + } + } + } +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "message": "some arrays in this document...", + "tags": [ "elasticsearch", "wow" ], + "lists": [ + { + "name": "prog_list", + "description": "programming list" + }, + { + "name": "cool_list", + "description": "cool stuff list" + } + ] +} +emit snippet +body part: +{ + "message": "no arrays in this document...", + "tags": "elasticsearch", + "lists": { + "name": "prog_list", + "description": "programming list" + } +} +emit snippet +body part: +{ + "query": { + "match": { + "tags": "elasticsearch" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "properties": { + "ip_addr": { + "type": "ip" + } + } + } + } +} +emit snippet +body part: +{ + "ip_addr": "192.168.1.1" +} +emit snippet +body part: +{ + "query": { + "term": { + "ip_addr": "192.168.0.0/16" + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "dynamic_templates": [ + { + "integers": { + "match_mapping_type": "long", + "mapping": { + "type": "integer" + } + } + }, + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "fields": { + "raw": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + } + ] + } + } +} +emit snippet +body part: +{ + "my_integer": 5, + "my_string": "Some string" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "dynamic_templates": [ + { + "longs_as_strings": { + "match_mapping_type": "string", + "match": "long_*", + "unmatch": "*_text", + "mapping": { + "type": "long" + } + } + } + ] + } + } +} +emit snippet +body part: +{ + "long_num": "5", + "long_text": "foo" +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "dynamic_templates": [ + { + "full_name": { + "path_match": "name.*", + "path_unmatch": "*.middle", + "mapping": { + "type": "text", + "copy_to": "full_name" + } + } + } + ] + } + } +} +emit snippet +body part: +{ + "name": { + "first": "Alice", + "middle": "Mary", + "last": "White" + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "dynamic_templates": [ + { + "named_analyzers": { + "match_mapping_type": "string", + "match": "*", + "mapping": { + "type": "text", + "analyzer": "{name}" + } + } + }, + { + "no_doc_values": { + "match_mapping_type":"*", + "mapping": { + "type": "{dynamic_type}", + "doc_values": false + } + } + } + ] + } + } +} +emit snippet +body part: +{ + "english": "Some English text", + "count": 5 +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "order": 0, + "template": "*", + "mappings": { + "_default_": { + "_all": { + "enabled": false + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "create_date": "2015/09/02" +} +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "date_detection": false + } + } +} +emit snippet +body part: +{ + "create": "2015/09/02" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "dynamic_date_formats": ["MM/dd/yyyy"] + } + } +} +emit snippet +body part: +{ + "create_date": "09/25/2015" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "numeric_detection": true + } + } +} +emit snippet +body part: +{ + "my_float": "1.0", + "my_integer": "1" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "_default_": { + "_all": { + "enabled": false + } + }, + "user": {}, + "blogpost": { + "_all": { + "enabled": true + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "template": "logs-*", + "settings": { "number_of_shards": 1 }, + "mappings": { + "_default_": { + "_all": { + "enabled": false + }, + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "fields": { + "raw": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + } + ] + } + } +} +emit snippet +body part: +{ "message": "error:16" } +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "_timestamp": { + "enabled": true + } + } + } +} +emit snippet +query part: timestamp=2015-01-01 +body part: +{ "text": "Timestamp as a formatted date" } +emit snippet +query part: timestamp=1420070400000 +body part: +{ "text": "Timestamp as milliseconds since the epoch" } +emit snippet +body part: +{ "text": "Autogenerated timestamp set to now()" } +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "range": { + "_timestamp": { + "gte": "2015-01-01" + } + } + }, + "aggs": { + "Timestamps": { + "terms": { + "field": "_timestamp", + "size": 10 + } + } + }, + "sort": [ + { + "_timestamp": { + "order": "desc" + } + } + ], + "script_fields": { + "Timestamp": { + "script": "doc['_timestamp']" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "first_name": "John", + "last_name": "Smith", + "date_of_birth": "1970-10-24" +} +emit snippet +body part: +{ + "query": { + "match": { + "_all": "john smith 1970" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "query_string": { + "query": "john smith 1970" + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "type_1": { + "properties": {} + }, + "type_2": { + "_all": { + "enabled": false + }, + "properties": {} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "_all": { + "enabled": false + }, + "properties": { + "content": { + "type": "text" + } + } + } + }, + "settings": { + "index.query.default_field": "content" + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "mytype": { + "properties": { + "title": { + "type": "text", + "boost": 2 + }, + "content": { + "type": "text" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "mytype": { + "properties": { + "first_name": { + "type": "text", + "copy_to": "full_name" + }, + "last_name": { + "type": "text", + "copy_to": "full_name" + }, + "full_name": { + "type": "text" + } + } + } + } +} +emit snippet +body part: +{ + "first_name": "John", + "last_name": "Smith" +} +emit snippet +body part: +{ + "query": { + "match": { + "full_name": "John Smith" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "mytype": { + "_all": { + "store": true + } + } + } +} +emit snippet +body part: +{ + "first_name": "John", + "last_name": "Smith" +} +emit snippet +body part: +{ + "query": { + "match": { + "_all": "John Smith" + } + }, + "highlight": { + "fields": { + "_all": {} + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "mytype": { + "_all": {} + } + } +} +emit snippet +body part: +{ + "first_name": "John", + "last_name": "Smith" +} +emit snippet +body part: +{ + "query": { + "match": { + "_all": "John Smith" + } + }, + "highlight": { + "fields": { + "*_name": { + "require_field_match": false + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "title": "This is a document" +} +emit snippet +body part: +{ + "title": "This is another document", + "body": "This document has a body" +} +emit snippet +body part: +{ + "query": { + "terms": { + "_field_names": [ "title" ] + } + }, + "script_fields": { + "Field names": { + "script": "doc['_field_names']" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "text": "Document in index 1" +} +emit snippet +body part: +{ + "text": "Document in index 2" +} +emit snippet +body part: +{ + "query": { + "terms": { + "_index": ["index_1", "index_2"] + } + }, + "aggs": { + "indices": { + "terms": { + "field": "_index", + "size": 10 + } + } + }, + "sort": [ + { + "_index": { + "order": "asc" + } + } + ], + "script_fields": { + "index_name": { + "script": "doc['_index']" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "tweet": { + "_source": { + "enabled": false + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "event": { + "_source": { + "includes": [ + "*.count", + "meta.*" + ], + "excludes": [ + "meta.description", + "meta.other.*" + ] + } + } + } +} +emit snippet +body part: +{ + "requests": { + "count": 10, + "foo": "bar" + }, + "meta": { + "name": "Some metric", + "description": "Some metric description", + "other": { + "foo": "one", + "baz": "two" + } + } +} +emit snippet +body part: +{ + "query": { + "match": { + "meta.other.foo": "one" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "text": "Document with type 1" +} +emit snippet +body part: +{ + "text": "Document with type 2" +} +emit snippet +body part: +{ + "query": { + "terms": { + "_type": [ "type_1", "type_2" ] + } + }, + "aggs": { + "types": { + "terms": { + "field": "_type", + "size": 10 + } + } + }, + "sort": [ + { + "_type": { + "order": "desc" + } + } + ], + "script_fields": { + "type": { + "script": "doc['_type']" + } + } +} +handle snippet +null +emit snippet +query part: routing=user1 +body part: +{ + "title": "This is a document" +} +emit snippet +query part: routing=user1 +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "terms": { + "_routing": [ "user1" ] + } + }, + "script_fields": { + "Routing value": { + "script": "doc['_routing']" + } + } +} +handle snippet +test snippet +emit snippet +query part: routing=user1,user2 +body part: +{ + "query": { + "match": { + "title": "document" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "_routing": { + "required": true + } + } + } +} +emit snippet +catch part: request +body part: +{ + "text": "No routing value provided" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "_ttl": { + "enabled": true + } + } + } +} +emit snippet +query part: ttl=10m +body part: +{ + "text": "Will expire in 10 minutes" +} +emit snippet +body part: +{ + "text": "Will not expire" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_type": { + "_ttl": { + "enabled": true, + "default": "5m" + } + } + } +} +emit snippet +query part: ttl=10m +body part: +{ + "text": "Will expire in 10 minutes" +} +emit snippet +body part: +{ + "text": "Will expire in 5 minutes" +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_parent": {}, + "my_child": { + "_parent": { + "type": "my_parent" + } + } + } +} +emit snippet +body part: +{ + "text": "This is a parent document" +} +emit snippet +query part: parent=1 +body part: +{ + "text": "This is a child document" +} +emit snippet +query part: parent=1 +body part: +{ + "text": "This is another child document" +} +emit snippet +body part: +{ + "query": { + "has_child": { + "type": "my_child", + "query": { + "match": { + "text": "child document" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "query": { + "terms": { + "_parent": [ "1" ] + } + }, + "aggs": { + "parents": { + "terms": { + "field": "_parent", + "size": 10 + } + } + }, + "script_fields": { + "parent": { + "script": "doc['_parent']" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "my_parent": {}, + "my_child": { + "_parent": { + "type": "my_parent", + "eager_global_ordinals": true + } + } + } +} +handle snippet +test snippet +emit snippet +query part: human&fields=_parent +emit snippet +query part: human&fields=_parent +handle snippet +test snippet +emit snippet +body part: +{ + "text": "Document with ID 1" +} +emit snippet +body part: +{ + "text": "Document with ID 2" +} +emit snippet +body part: +{ + "query": { + "terms": { + "_id": [ "1", "2" ] + } + }, + "script_fields": { + "UID": { + "script": "doc['_id']" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "text": "Document with ID 1" +} +emit snippet +body part: +{ + "text": "Document with ID 2" +} +emit snippet +body part: +{ + "query": { + "terms": { + "_uid": [ "my_type#1", "my_type#2" ] + } + }, + "aggs": { + "UIDs": { + "terms": { + "field": "_uid", + "size": 10 + } + } + }, + "sort": [ + { + "_uid": { + "order": "desc" + } + } + ], + "script_fields": { + "UID": { + "script": "doc['_uid']" + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "user": { + "_meta": { + "class": "MyApp::User", + "version": { + "min": "1.0", + "max": "1.3" + } + } + } + } +} +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +body part: +{ + "settings": { + "index.priority": 10 + } +} +emit snippet +body part: +{ + "settings": { + "index.priority": 5 + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "index.priority": 1 +} +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "settings": { + "index.unassigned.node_left.delayed_timeout": "5m" + } +} +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "settings": { + "index.unassigned.node_left.delayed_timeout": "0" + } +} +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "index.routing.allocation.include.size": "big,medium" +} +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "index.routing.allocation.exclude.size": "small" +} +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "index.routing.allocation.include.size": "big", + "index.routing.allocation.include.rack": "rack1" +} +handle snippet +test snippet +emit snippet +body part: +{ + "index.routing.allocation.include._ip": "192.168.2.*" +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "tokenizer" : "standard", + "token_filter" : ["snowball"], + "text" : "detailed output", + "explain" : true, + "attributes" : ["keyword"] +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "template": "te*", + "settings": { + "number_of_shards": 1 + }, + "mappings": { + "type1": { + "_source": { + "enabled": false + }, + "properties": { + "host_name": { + "type": "keyword" + }, + "created_at": { + "type": "date", + "format": "EEE MMM dd HH:mm:ss Z YYYY" + } + } + } + } +} +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "tweet": { + "properties": { + "message": { + "type": "text" + } + } + } + } +} +emit snippet +body part: +{ + "properties": { + "name": { + "type": "text" + } + } +} +emit snippet +body part: +{ + "properties": { + "user_name": { + "type": "text" + } + } +} +handle snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "user": { + "properties": { + "name": { + "properties": { + "first": { + "type": "text" + } + } + }, + "user_id": { + "type": "keyword" + } + } + } + } +} +emit snippet +body part: +{ + "properties": { + "name": { + "properties": { + "last": { + "type": "text" + } + } + }, + "user_id": { + "type": "keyword", + "ignore_above": 100 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "type_one": { + "properties": { + "text": { + "type": "text", + "analyzer": "standard" + } + } + }, + "type_two": { + "properties": { + "text": { + "type": "text", + "analyzer": "standard" + } + } + } + } +} +emit snippet +catch part: request +body part: +{ + "properties": { + "text": { + "type": "text", + "analyzer": "standard", + "search_analyzer": "whitespace" + } + } +} +handle snippet +test snippet +emit snippet +query part: update_all_types +body part: +{ + "properties": { + "text": { + "type": "text", + "analyzer": "standard", + "search_analyzer": "whitespace" + } + } +} +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "actions" : [ + { "add" : { "index" : "test1", "alias" : "alias1" } } + ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "actions" : [ + { "remove" : { "index" : "test1", "alias" : "alias1" } } + ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "actions" : [ + { "remove" : { "index" : "test1", "alias" : "alias1" } }, + { "add" : { "index" : "test1", "alias" : "alias2" } } + ] +} +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +body part: +{ + "actions" : [ + { "add" : { "index" : "test1", "alias" : "alias1" } }, + { "add" : { "index" : "test2", "alias" : "alias1" } } + ] +} +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +body part: +{ + "actions" : [ + { "add" : { "indices" : ["test1", "test2"], "alias" : "alias1" } } + ] +} +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +body part: +{ + "actions" : [ + { "add" : { "index" : "test*", "alias" : "all_test_indices" } } + ] +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings": { + "type1": { + "properties": { + "user" : { + "type": "keyword" + } + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "actions" : [ + { + "add" : { + "index" : "test1", + "alias" : "alias2", + "filter" : { "term" : { "user" : "kimchy" } } + } + } + ] +} +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "actions" : [ + { + "add" : { + "index" : "test", + "alias" : "alias1", + "routing" : "1" + } + } + ] +} +handle snippet +test snippet +emit snippet +emit snippet +body part: +{ + "actions" : [ + { + "add" : { + "index" : "test", + "alias" : "alias2", + "search_routing" : "1,2", + "index_routing" : "2" + } + } + ] +} +handle snippet +test snippet +emit snippet +query part: q=user:kimchy&routing=2,3 +handle snippet +test snippet +emit snippet +emit snippet +handle snippet +test snippet +emit snippet +body part: +{ + "mappings" : { + "user" : { + "properties" : { + "user_id" : {"type" : "integer"} + } + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "routing" : "12", + "filter" : { + "term" : { + "user_id" : 12 + } + } +} +handle snippet +test snippet +emit snippet +body part: +{ + "mappings" : { + "type" : { + "properties" : { + "year" : {"type" : "integer"} + } + } + }, + "aliases" : { + "current_day" : {}, + "2016" : { + "filter" : { + "term" : {"year" : 2016 } + } + } + } +} +handle snippet +test snippet +emit snippet +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +test snippet +emit snippet +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +handle snippet +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +handle snippet +test snippet +emit snippet +emit snippet +emit snippet +emit snippet +handle snippet +test snippet +emit snippet +emit snippet +query part: level=shards +handle snippet +handle snippet +test snippet + + - do: + bulk: + index: twitter + type: tweet + refresh: true + body: | + {"index":{"_id": "0"}} + {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} + {"index":{"_id": "1"}} + {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} + {"index":{"_id": "2"}} + {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} + {"index":{"_id": "3"}} + {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} + {"index":{"_id": "4"}} + {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} +emit snippet +handle snippet +handle snippet +handle snippet +handle snippet +test snippet +emit snippet +emit snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +handle snippet +:core:compileJava UP-TO-DATE +:core:generateModulesList UP-TO-DATE +:core:generatePluginsList UP-TO-DATE +:core:processResources UP-TO-DATE +:core:classes UP-TO-DATE +:core:jar UP-TO-DATE +:test:framework:compileJava UP-TO-DATE +:test:framework:processResources UP-TO-DATE +:test:framework:classes UP-TO-DATE +:test:framework:jar UP-TO-DATE +:docs:compileTestJava +:docs:testClasses +:docs:integTest#prepareCluster.cleanShared +:docs:integTest#clean +:docs:integTest#checkPrevious SKIPPED +:docs:integTest#stopPrevious SKIPPED +:modules:aggs-matrix-stats:compileJava UP-TO-DATE +:modules:aggs-matrix-stats:processResources UP-TO-DATE +:modules:aggs-matrix-stats:classes UP-TO-DATE +:modules:aggs-matrix-stats:jar UP-TO-DATE +:modules:aggs-matrix-stats:copyPluginPropertiesTemplate +:modules:aggs-matrix-stats:pluginProperties UP-TO-DATE +:modules:aggs-matrix-stats:bundlePlugin UP-TO-DATE +:modules:ingest-grok:compileJava UP-TO-DATE +:modules:ingest-grok:processResources UP-TO-DATE +:modules:ingest-grok:classes UP-TO-DATE +:modules:ingest-grok:jar UP-TO-DATE +:modules:ingest-grok:copyPluginPropertiesTemplate +:modules:ingest-grok:pluginProperties UP-TO-DATE +:modules:ingest-grok:bundlePlugin UP-TO-DATE +:modules:lang-expression:compileJava UP-TO-DATE +:modules:lang-expression:processResources UP-TO-DATE +:modules:lang-expression:classes UP-TO-DATE +:modules:lang-expression:jar UP-TO-DATE +:modules:lang-expression:copyPluginPropertiesTemplate +:modules:lang-expression:pluginProperties UP-TO-DATE +:modules:lang-expression:bundlePlugin UP-TO-DATE +:modules:lang-groovy:compileJava UP-TO-DATE +:modules:lang-groovy:processResources UP-TO-DATE +:modules:lang-groovy:classes UP-TO-DATE +:modules:lang-groovy:jar UP-TO-DATE +:modules:lang-groovy:copyPluginPropertiesTemplate +:modules:lang-groovy:pluginProperties UP-TO-DATE +:modules:lang-groovy:bundlePlugin UP-TO-DATE +:modules:lang-mustache:compileJava UP-TO-DATE +:modules:lang-mustache:processResources UP-TO-DATE +:modules:lang-mustache:classes UP-TO-DATE +:modules:lang-mustache:jar UP-TO-DATE +:modules:lang-mustache:copyPluginPropertiesTemplate +:modules:lang-mustache:pluginProperties UP-TO-DATE +:modules:lang-mustache:bundlePlugin UP-TO-DATE +:modules:lang-painless:compileJava UP-TO-DATE +:modules:lang-painless:processResources UP-TO-DATE +:modules:lang-painless:classes UP-TO-DATE +:modules:lang-painless:jar UP-TO-DATE +:modules:lang-painless:copyPluginPropertiesTemplate +:modules:lang-painless:pluginProperties UP-TO-DATE +:modules:lang-painless:bundlePlugin UP-TO-DATE +:modules:percolator:compileJava UP-TO-DATE +:modules:percolator:processResources UP-TO-DATE +:modules:percolator:classes UP-TO-DATE +:modules:percolator:jar UP-TO-DATE +:modules:percolator:copyPluginPropertiesTemplate +:modules:percolator:pluginProperties UP-TO-DATE +:modules:percolator:bundlePlugin UP-TO-DATE +:modules:reindex:compileJava UP-TO-DATE +:modules:reindex:processResources UP-TO-DATE +:modules:reindex:classes UP-TO-DATE +:modules:reindex:jar UP-TO-DATE +:modules:reindex:copyPluginPropertiesTemplate +:modules:reindex:pluginProperties UP-TO-DATE +:modules:reindex:bundlePlugin UP-TO-DATE +:distribution:buildModules UP-TO-DATE +:distribution:zip:buildZip UP-TO-DATE +:docs:integTest#extract +:docs:integTest#configure +:docs:integTest#start +:docs:integTest#wait +:docs:integTest + [junit4] says ahoj! Master seed: E40438BBAF23DBE2 +==> Test Info: seed=E40438BBAF23DBE2; jvm=1; suite=1 +Suite: org.elasticsearch.smoketest.SmokeTestDocsIT +==> Test Summary: 1 suite (1 ignored), 0 tests + [junit4] JVM J0: 0.30 .. 1.64 = 1.35s + [junit4] Execution time total: 1.66 sec. + [junit4] Tests summary: 1 suite (1 ignored), 0 tests +:docs:integTest FAILED +:docs:integTest#stop + +BUILD FAILED + +Total time: 22.834 secs + +BUILD SUCCESSFUL +Total time: 1 second diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index 6e990e07f91..176473174dd 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -23,7 +23,6 @@ PUT /my_index } } -GET _cluster/health?wait_for_status=yellow -------------------------------------------------- // CONSOLE // TESTSETUP diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index 8ea07a6d0b7..a7a28cf88e8 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -29,7 +29,6 @@ PUT /my_index } } -GET /_cluster/health?wait_for_status=yellow ------------------------------------------ // CONSOLE // TESTSETUP From 974c753bf6fdc00d30bab96d737447a0b9113a8f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 3 Jun 2016 14:17:25 -0400 Subject: [PATCH 11/39] Fix uncaught checked exception in AzureTestUtils This commit fixes an uncaught checked IOException now thrown in AzureTestUtils after 3adaf096758a6015ca4f733e2e49ee5528ac3cd5. --- .../org/elasticsearch/cloud/azure/AzureTestUtils.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java index 80840db587f..097f519db03 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/AzureTestUtils.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import java.io.IOException; + public class AzureTestUtils { /** * Read settings from file when running integration tests with ThirdParty annotation. @@ -36,7 +38,11 @@ public class AzureTestUtils { // if explicit, just load it and don't load from env try { if (Strings.hasText(System.getProperty("tests.config"))) { - settings.loadFromPath(PathUtils.get((System.getProperty("tests.config")))); + try { + settings.loadFromPath(PathUtils.get((System.getProperty("tests.config")))); + } catch (IOException e) { + throw new IllegalArgumentException("could not load azure tests config", e); + } } else { throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and " + "-Dtests.config=/path/to/elasticsearch.yml"); From 92c6d78d4a17213ebed306f7f7646041bb0fcfe4 Mon Sep 17 00:00:00 2001 From: Stefan Scherer Date: Fri, 3 Jun 2016 21:30:18 +0200 Subject: [PATCH 12/39] Use java from path if JAVA_HOME is not set This commit adds support to the Windows scripts for finding java.exe via the path if JAVA_HOME is not set. Relates #18685 --- .../resources/bin/elasticsearch-plugin.bat | 21 +++++++------- .../src/main/resources/bin/elasticsearch.bat | 10 +++---- .../main/resources/bin/elasticsearch.in.bat | 15 ++++++---- .../src/main/resources/bin/service.bat | 29 +++++++++++++------ 4 files changed, 44 insertions(+), 31 deletions(-) diff --git a/distribution/src/main/resources/bin/elasticsearch-plugin.bat b/distribution/src/main/resources/bin/elasticsearch-plugin.bat index 5604b57dbef..ba35ad1c214 100644 --- a/distribution/src/main/resources/bin/elasticsearch-plugin.bat +++ b/distribution/src/main/resources/bin/elasticsearch-plugin.bat @@ -2,7 +2,15 @@ SETLOCAL enabledelayedexpansion -if NOT DEFINED JAVA_HOME goto err +IF DEFINED JAVA_HOME ( + set JAVA=%JAVA_HOME%\bin\java.exe +) ELSE ( + FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I +) +IF NOT EXIST "%JAVA%" ( + ECHO Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME 1>&2 + EXIT /B 1 +) set SCRIPT_DIR=%~dp0 for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI @@ -48,15 +56,6 @@ GOTO loop SET HOSTNAME=%COMPUTERNAME% -"%JAVA_HOME%\bin\java" %ES_JAVA_OPTS% -Des.path.home="%ES_HOME%" !properties! -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginCli" !args! -goto finally - - -:err -echo JAVA_HOME environment variable must be set! -pause - - -:finally +"%JAVA%" %ES_JAVA_OPTS% -Des.path.home="%ES_HOME%" !properties! -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginCli" !args! ENDLOCAL diff --git a/distribution/src/main/resources/bin/elasticsearch.bat b/distribution/src/main/resources/bin/elasticsearch.bat index 9f1d871d0a2..37d7fc026a8 100644 --- a/distribution/src/main/resources/bin/elasticsearch.bat +++ b/distribution/src/main/resources/bin/elasticsearch.bat @@ -35,14 +35,14 @@ FOR /F "usebackq tokens=1* delims= " %%A IN (!params!) DO ( SET current=%%A SET params='%%B' SET silent=N - + IF "!current!" == "-s" ( SET silent=Y ) IF "!current!" == "--silent" ( SET silent=Y - ) - + ) + IF "!silent!" == "Y" ( SET nopauseonerror=Y ) ELSE ( @@ -52,7 +52,7 @@ FOR /F "usebackq tokens=1* delims= " %%A IN (!params!) DO ( SET newparams=!current! ) ) - + IF "x!params!" NEQ "x" ( GOTO loop ) @@ -79,6 +79,6 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) -"%JAVA_HOME%\bin\java" %ES_JAVA_OPTS% %ES_PARAMS% -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" !newparams! +"%JAVA%" %ES_JAVA_OPTS% %ES_PARAMS% -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" !newparams! ENDLOCAL diff --git a/distribution/src/main/resources/bin/elasticsearch.in.bat b/distribution/src/main/resources/bin/elasticsearch.in.bat index d6f1ef308bd..2d73ed2b57b 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.bat +++ b/distribution/src/main/resources/bin/elasticsearch.in.bat @@ -1,12 +1,15 @@ @echo off -if DEFINED JAVA_HOME goto cont +IF DEFINED JAVA_HOME ( + set JAVA=%JAVA_HOME%\bin\java.exe +) ELSE ( + FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I +) +IF NOT EXIST "%JAVA%" ( + ECHO Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME 1>&2 + EXIT /B 1 +) -:err -ECHO JAVA_HOME environment variable must be set! 1>&2 -EXIT /B 1 - -:cont set SCRIPT_DIR=%~dp0 for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI diff --git a/distribution/src/main/resources/bin/service.bat b/distribution/src/main/resources/bin/service.bat index 81b6c8a5df5..344782030ee 100644 --- a/distribution/src/main/resources/bin/service.bat +++ b/distribution/src/main/resources/bin/service.bat @@ -28,27 +28,38 @@ if %bad_env_var% == 1 ( ) rem end TODO: remove for Elasticsearch 6.x -if NOT DEFINED JAVA_HOME goto err +IF DEFINED JAVA_HOME ( + SET JAVA=%JAVA_HOME%\bin\java.exe +) ELSE ( + FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I +) +IF NOT EXIST "%JAVA%" ( + ECHO Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME 1>&2 + EXIT /B 1 +) +IF DEFINED JAVA_HOME GOTO :cont +IF NOT "%JAVA:~-13%" == "\bin\java.exe" ( + FOR /f "tokens=2 delims=[]" %%I IN ('dir %JAVA%') DO @set JAVA=%%I +) +IF "%JAVA:~-13%" == "\bin\java.exe" ( + SET JAVA_HOME=%JAVA:~0,-13% +) + +:cont if not "%CONF_FILE%" == "" goto conffileset set SCRIPT_DIR=%~dp0 for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI -rem Detect JVM version to figure out appropriate executable to use -if not exist "%JAVA_HOME%\bin\java.exe" ( -echo JAVA_HOME points to an invalid Java installation (no java.exe found in "%JAVA_HOME%"^). Exiting... -goto:eof -) - -"%JAVA_HOME%\bin\java" -Xmx50M -version > nul 2>&1 +"%JAVA%" -Xmx50M -version > nul 2>&1 if errorlevel 1 ( echo Warning: Could not start JVM to detect version, defaulting to x86: goto x86 ) -"%JAVA_HOME%\bin\java" -Xmx50M -version 2>&1 | "%windir%\System32\find" "64-Bit" >nul: +"%JAVA%" -Xmx50M -version 2>&1 | "%windir%\System32\find" "64-Bit" >nul: if errorlevel 1 goto x86 set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x64.exe From be0036542c4d1c40c0044cd22f45ea1d2534b8ec Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 3 Jun 2016 16:18:46 -0400 Subject: [PATCH 13/39] More complete exception message in settings tests This commit adds additional details to the exception message assertions in the YAML settings loader tests. --- .../loader/YamlSettingsLoaderTests.java | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java index 618209cf114..7c956de8f9a 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.common.settings.loader; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Collections; - import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; -import static org.hamcrest.Matchers.containsString; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + import static org.hamcrest.Matchers.equalTo; public class YamlSettingsLoaderTests extends ESTestCase { @@ -75,8 +74,10 @@ public class YamlSettingsLoaderTests extends ESTestCase { }); assertEquals(e.getCause().getClass(), ElasticsearchParseException.class); String msg = e.getCause().getMessage(); - assertTrue(msg, msg.contains("duplicate settings key [foo] found")); - assertTrue(msg, msg.contains("previous value [bar], current value [baz]")); + assertTrue( + msg, + msg.contains("duplicate settings key [foo] found at line number [2], column number [6], " + + "previous value [bar], current value [baz]")); } public void testMissingValue() throws Exception { @@ -85,6 +86,8 @@ public class YamlSettingsLoaderTests extends ESTestCase { ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> { Settings.builder().loadFromPath(tmp); }); - assertTrue(e.getMessage(), e.getMessage().contains("null-valued setting found for key [foo] found at line")); + assertTrue( + e.getMessage(), + e.getMessage().contains("null-valued setting found for key [foo] found at line number [1], column number [5]")); } } From c0a3a200712607ca07a738541c6b6b2e32609111 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Fri, 3 Jun 2016 23:31:06 +0200 Subject: [PATCH 14/39] painless: Add support for the new Java 9 MethodHandles#arrayLength() factory (see https://bugs.openjdk.java.net/browse/JDK-8156915) --- .../java/org/elasticsearch/painless/Def.java | 32 +++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index bd8e09f504b..f27f0ab20a9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -53,7 +53,7 @@ public final class Def { /** Helper class for isolating MethodHandles and methods to get the length of arrays * (to emulate a "arraystore" bytecode using MethodHandles). - * This should really be a method in {@link MethodHandles} class! + * See: https://bugs.openjdk.java.net/browse/JDK-8156915 */ @SuppressWarnings("unused") // getArrayLength() methods are are actually used, javac just does not know :) private static final class ArrayLengthHelper { @@ -103,6 +103,8 @@ public final class Def { private static final MethodHandle LIST_GET; /** pointer to List.set(int,Object) */ private static final MethodHandle LIST_SET; + /** factory for arraylength MethodHandle (intrinsic) from Java 9 */ + private static final MethodHandle JAVA9_ARRAY_LENGTH_MH_FACTORY; static { final Lookup lookup = MethodHandles.publicLookup(); @@ -115,11 +117,37 @@ public final class Def { } catch (final ReflectiveOperationException roe) { throw new AssertionError(roe); } + + // lookup up the factory for arraylength MethodHandle (intrinsic) from Java 9: + // https://bugs.openjdk.java.net/browse/JDK-8156915 + MethodHandle arrayLengthMHFactory; + try { + arrayLengthMHFactory = lookup.findStatic(MethodHandles.class, "arrayLength", + MethodType.methodType(MethodHandle.class, Class.class)); + } catch (final ReflectiveOperationException roe) { + arrayLengthMHFactory = null; + } + JAVA9_ARRAY_LENGTH_MH_FACTORY = arrayLengthMHFactory; } + /** Hack to rethrow unknown Exceptions from {@link MethodHandle#invokeExact}: */ + @SuppressWarnings("unchecked") + private static void rethrow(Throwable t) throws T { + throw (T) t; + } + /** Returns an array length getter MethodHandle for the given array type */ static MethodHandle arrayLengthGetter(Class arrayType) { - return ArrayLengthHelper.arrayLengthGetter(arrayType); + if (JAVA9_ARRAY_LENGTH_MH_FACTORY != null) { + try { + return (MethodHandle) JAVA9_ARRAY_LENGTH_MH_FACTORY.invokeExact(arrayType); + } catch (Throwable t) { + rethrow(t); + throw new AssertionError(t); + } + } else { + return ArrayLengthHelper.arrayLengthGetter(arrayType); + } } /** From b79fd4c360789c8ff917185db305f11f1ebbdd81 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 3 Jun 2016 17:56:03 -0400 Subject: [PATCH 15/39] Remove log file Looks like accidentally committed. --- docs/out | 11165 ----------------------------------------------------- 1 file changed, 11165 deletions(-) delete mode 100644 docs/out diff --git a/docs/out b/docs/out deleted file mode 100644 index 5a0af906e79..00000000000 --- a/docs/out +++ /dev/null @@ -1,11165 +0,0 @@ -:buildSrc:compileJava UP-TO-DATE -:buildSrc:compileGroovy -:buildSrc:writeVersionProperties UP-TO-DATE -:buildSrc:processResources UP-TO-DATE -:buildSrc:classes -:buildSrc:jar -:buildSrc:assemble -:buildSrc:compileTestJava UP-TO-DATE -:buildSrc:compileTestGroovy UP-TO-DATE -:buildSrc:processTestResources UP-TO-DATE -:buildSrc:testClasses UP-TO-DATE -:buildSrc:test UP-TO-DATE -:buildSrc:check UP-TO-DATE -:buildSrc:build -======================================= -Elasticsearch Build Hamster says Hello! -======================================= - Gradle Version : 2.13 - OS Info : Linux 3.13.0-39-generic (amd64) - JDK Version : Oracle Corporation 1.8.0_65 [Java HotSpot(TM) 64-Bit Server VM 25.65-b01] - JAVA_HOME : /usr/java/jdk1.8.0_65 -:docs:clean -:docs:processTestResources UP-TO-DATE -:rest-api-spec:compileJava UP-TO-DATE -:rest-api-spec:processResources UP-TO-DATE -:rest-api-spec:classes UP-TO-DATE -:rest-api-spec:jar UP-TO-DATE -:docs:copyRestSpec -:docs:buildRestTests -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "title": { - "type": "text", - "analyzer": "standard" - } - } - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "my_number": { - "type": "long", - "fields": { - "keyword": { - "type": "keyword" - } - } - } - } - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "whitespace", - "text": "The quick brown fox." -} -emit snippet -body part: -{ - "tokenizer": "standard", - "filter": [ "lowercase", "asciifolding" ], - "text": "Is this déja vu?" -} -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "std_folded": { - "type": "custom", - "tokenizer": "standard", - "filter": [ - "lowercase", - "asciifolding" - ] - } - } - } - }, - "mappings": { - "my_type": { - "properties": { - "my_text": { - "type": "text", - "analyzer": "std_folded" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "std_folded", - "text": "Is this déjà vu?" -} -emit snippet -body part: -{ - "field": "my_text", - "text": "Is this déjà vu?" -} -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "classic", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "classic", - "max_token_length": 5 - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "whitespace", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "ngram", - "text": "Quick Fox" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "ngram", - "min_gram": 3, - "max_gram": 3, - "token_chars": [ - "letter", - "digit" - ] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "2 Quick Foxes." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "letter", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "pattern", - "text": "The foo_bar_size's default is 5." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "pattern", - "pattern": "," - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "comma,separated,values" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "pattern", - "pattern": "\"((?:\\\\\"|[^\"]|\\\\\")+)\"", - "group": 1 - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "\"value\", \"value with embedded \\\" quote\"" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "edge_ngram", - "text": "Quick Fox" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "edge_ngram", - "min_gram": 2, - "max_gram": 10, - "token_chars": [ - "letter", - "digit" - ] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "2 Quick Foxes." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "autocomplete": { - "tokenizer": "autocomplete", - "filter": [ - "lowercase" - ] - }, - "autocomplete_search": { - "tokenizer": "lowercase" - } - }, - "tokenizer": { - "autocomplete": { - "type": "edge_ngram", - "min_gram": 2, - "max_gram": 10, - "token_chars": [ - "letter" - ] - } - } - } - }, - "mappings": { - "doc": { - "properties": { - "title": { - "type": "text", - "analyzer": "autocomplete", - "search_analyzer": "autocomplete_search" - } - } - } - } -} -emit snippet -body part: -{ - "title": "Quick Foxes" -} -emit snippet -emit snippet -body part: -{ - "query": { - "match": { - "title": { - "query": "Quick Fo", - "operator": "and" - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "uax_url_email", - "text": "Email me at john.smith@global-international.com" -} -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "uax_url_email", - "max_token_length": 5 - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "john.smith@global-international.com" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "path_hierarchy", - "text": "/one/two/three" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "path_hierarchy", - "delimiter": "-", - "replacement": "/", - "skip": 2 - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "one-two-three-four-five" -} -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "standard", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "my_tokenizer" - } - }, - "tokenizer": { - "my_tokenizer": { - "type": "standard", - "max_token_length": 5 - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "keyword", - "text": "New York" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "thai", - "text": "การที่ได้ต้องแสดงว่างานดี" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "lowercase", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "standard", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_english_analyzer": { - "type": "standard", - "max_token_length": 5, - "stopwords": "_english_" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_english_analyzer", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "simple", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "whitespace", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "keyword", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "std_english": { - "type": "standard", - "stopwords": "_english_" - } - } - } - }, - "mappings": { - "my_type": { - "properties": { - "my_text": { - "type": "text", - "analyzer": "standard", - "fields": { - "english": { - "type": "text", - "analyzer": "std_english" - } - } - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "field": "my_text", - "text": "The old brown cow" -} -emit snippet -body part: -{ - "field": "my_text.english", - "text": "The old brown cow" -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "stop", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_stop_analyzer": { - "type": "stop", - "stopwords": ["the", "over"] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_stop_analyzer", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_custom_analyzer": { - "type": "custom", - "tokenizer": "standard", - "char_filter": [ - "html_strip" - ], - "filter": [ - "lowercase", - "asciifolding" - ] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_custom_analyzer", - "text": "Is this déjà vu?" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_custom_analyzer": { - "type": "custom", - "char_filter": [ - "emoticons" - ], - "tokenizer": "punctuation", - "filter": [ - "lowercase", - "english_stop" - ] - } - }, - "tokenizer": { - "punctuation": { - "type": "pattern", - "pattern": "[ .,!?]" - } - }, - "char_filter": { - "emoticons": { - "type": "mapping", - "mappings": [ - ":) => _happy_", - ":( => _sad_" - ] - } - }, - "filter": { - "english_stop": { - "type": "stop", - "stopwords": "_english_" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_custom_analyzer", - "text": "I'm a :) person, and you?" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "pattern", - "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_email_analyzer": { - "type": "pattern", - "pattern": "\\W|_", - "lowercase": true - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_email_analyzer", - "text": "John_Smith@foo-bar.com" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "camel": { - "type": "pattern", - "pattern": "([^\\p{L}\\d]+)|(?<=\\D)(?=\\d)|(?<=\\d)(?=\\D)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "camel", - "text": "MooseX::FTPClass2_beta" -} -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "analyzer": "fingerprint", - "text": "Yes yes, Gödel said this sentence is consistent and." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_fingerprint_analyzer": { - "type": "fingerprint", - "stopwords": "_english_" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_fingerprint_analyzer", - "text": "Yes yes, Gödel said this sentence is consistent and." -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer": "keyword", - "char_filter": [ "html_strip" ], - "text": "

I'm so happy!

" -} -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "keyword", - "char_filter": ["my_char_filter"] - } - }, - "char_filter": { - "my_char_filter": { - "type": "html_strip", - "escaped_tags": ["b"] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "

I'm so happy!

" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "keyword", - "char_filter": [ - "my_char_filter" - ] - } - }, - "char_filter": { - "my_char_filter": { - "type": "mapping", - "mappings": [ - "٠ => 0", - "١ => 1", - "٢ => 2", - "٣ => 3", - "٤ => 4", - "٥ => 5", - "٦ => 6", - "٧ => 7", - "٨ => 8", - "٩ => 9" - ] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "My license plate is ٢٥٠١٥" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "standard", - "char_filter": [ - "my_char_filter" - ] - } - }, - "char_filter": { - "my_char_filter": { - "type": "mapping", - "mappings": [ - ":) => _happy_", - ":( => _sad_" - ] - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "I'm delighted about it :(" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "standard", - "char_filter": [ - "my_char_filter" - ] - } - }, - "char_filter": { - "my_char_filter": { - "type": "pattern_replace", - "pattern": "(\\d+)-(?=\\d)", - "replacement": "$1_" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "My credit card is 123-456-789" -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "analyzer": { - "my_analyzer": { - "tokenizer": "standard", - "char_filter": [ - "my_char_filter" - ], - "filter": [ - "lowercase" - ] - } - }, - "char_filter": { - "my_char_filter": { - "type": "pattern_replace", - "pattern": "(?<=\\p{Lower})(?=\\p{Upper})", - "replacement": " " - } - } - } - }, - "mappings": { - "my_type": { - "properties": { - "text": { - "type": "text", - "analyzer": "my_analyzer" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "analyzer": "my_analyzer", - "text": "The fooBarBaz method" -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -query part: refresh -body part: -{ - "text": "The fooBarBaz method" -} -emit snippet -body part: -{ - "query": { - "match": { - "text": "bar" - } - }, - "highlight": { - "fields": { - "text": {} - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match_all": {} - }, - "fielddata_fields" : ["test1", "test2"] -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "from" : 0, "size" : 10, - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "_source": false, - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "_source": "obj.*", - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "_source": [ "obj1.*", "obj2.*" ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "_source": { - "include": [ "obj1.*", "obj2.*" ], - "exclude": [ "*.description" ] - }, - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "explain": true, - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match_all": {} - }, - "script_fields" : { - "test1" : { - "script" : "doc['my_field_name'].value * 2" - }, - "test2" : { - "script" : { - "inline": "doc['my_field_name'].value * factor", - "params" : { - "factor" : 2.0 - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: - { - "query" : { - "match_all": {} - }, - "script_fields" : { - "test1" : { - "script" : "_source.obj1.obj2" - } - } - } -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "fields" : { - "content" : {} - } - } -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "fields" : { - "content" : {"type" : "plain"} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "fields" : { - "content" : {"force_source" : true} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "pre_tags" : [""], - "post_tags" : [""], - "fields" : { - "_all" : {} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "pre_tags" : ["", ""], - "post_tags" : ["", ""], - "fields" : { - "_all" : {} - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "tags_schema" : "styled", - "fields" : { - "content" : {} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "fields" : { - "content" : {"fragment_size" : 150, "number_of_fragments" : 3} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "order" : "score", - "fields" : { - "content" : {"fragment_size" : 150, "number_of_fragments" : 3} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "fields" : { - "_all" : {}, - "bio.title" : {"number_of_fragments" : 0} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "fields" : { - "content" : { - "fragment_size" : 150, - "number_of_fragments" : 3, - "no_match_size": 150 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "fields": [ "_id" ], - "query" : { - "match": { - "content": { - "query": "foo bar" - } - } - }, - "rescore": { - "window_size": 50, - "query": { - "rescore_query" : { - "match_phrase": { - "content": { - "query": "foo bar", - "phrase_slop": 1 - } - } - }, - "rescore_query_weight" : 10 - } - }, - "highlight" : { - "order" : "score", - "fields" : { - "content" : { - "fragment_size" : 150, - "number_of_fragments" : 3, - "highlight_query": { - "bool": { - "must": { - "match": { - "content": { - "query": "foo bar" - } - } - }, - "should": { - "match_phrase": { - "content": { - "query": "foo bar", - "phrase_slop": 1, - "boost": 10.0 - } - } - }, - "minimum_should_match": 0 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "number_of_fragments" : 3, - "fragment_size" : 150, - "fields" : { - "_all" : { "pre_tags" : [""], "post_tags" : [""] }, - "bio.title" : { "number_of_fragments" : 0 }, - "bio.author" : { "number_of_fragments" : 0 }, - "bio.content" : { "number_of_fragments" : 5, "order" : "score" } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match": { "user": "kimchy" } - }, - "highlight" : { - "require_field_match": false, - "fields": { - "_all" : { "pre_tags" : [""], "post_tags" : [""] } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string": { - "query": "content.plain:running scissors", - "fields": ["content"] - } - }, - "highlight": { - "order": "score", - "fields": { - "content": { - "matched_fields": ["content", "content.plain"], - "type" : "fvh" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string": { - "query": "running scissors", - "fields": ["content", "content.plain^10"] - } - }, - "highlight": { - "order": "score", - "fields": { - "content": { - "matched_fields": ["content", "content.plain"], - "type" : "fvh" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string": { - "query": "running scissors", - "fields": ["content", "content.plain^10"] - } - }, - "highlight": { - "order": "score", - "fields": { - "content": { - "matched_fields": ["content.plain"], - "type" : "fvh" - } - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "min_score": 0.5, - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -query part: preference=xyzabc123 -body part: -{ - "query": { - "match": { - "title": "elasticsearch" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "should" : [ - {"match" : { "name.first" : {"query" : "shay", "_name" : "first"} }}, - {"match" : { "name.last" : {"query" : "banon", "_name" : "last"} }} - ], - "filter" : { - "terms" : { - "name.last" : ["banon", "kimchy"], - "_name" : "test" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "indices_boost" : { - "index1" : 1.4, - "index2" : 1.3 - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "post_date": { "type": "date" }, - "user": { - "type": "keyword" - }, - "name": { - "type": "keyword" - }, - "age": { "type": "integer" } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { "post_date" : {"order" : "asc"}}, - "user", - { "name" : "desc" }, - { "age" : "desc" }, - "_score" - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -query part: refresh -body part: -{ - "product": "chocolate", - "price": [20, 4] -} -emit snippet -body part: -{ - "query" : { - "term" : { "product" : "chocolate" } - }, - "sort" : [ - {"price" : {"order" : "asc", "mode" : "avg"}} - ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "term" : { "product" : "chocolate" } - }, - "sort" : [ - { - "offer.price" : { - "mode" : "avg", - "order" : "asc", - "nested_path" : "offer", - "nested_filter" : { - "term" : { "offer.color" : "blue" } - } - } - } - ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { "price" : {"missing" : "_last"} } - ], - "query" : { - "term" : { "product" : "chocolate" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { "price" : {"unmapped_type" : "long"} } - ], - "query" : { - "term" : { "product" : "chocolate" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { - "_geo_distance" : { - "pin.location" : [-70, 40], - "order" : "asc", - "unit" : "km", - "mode" : "min", - "distance_type" : "sloppy_arc" - } - } - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { - "_geo_distance" : { - "pin.location" : { - "lat" : 40, - "lon" : -70 - }, - "order" : "asc", - "unit" : "km" - } - } - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { - "_geo_distance" : { - "pin.location" : "40,-70", - "order" : "asc", - "unit" : "km" - } - } - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { - "_geo_distance" : { - "pin.location" : "drm3btev3e86", - "order" : "asc", - "unit" : "km" - } - } - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "sort" : [ - { - "_geo_distance" : { - "pin.location" : [-70, 40], - "order" : "asc", - "unit" : "km" - } - } - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "term" : { "user" : "kimchy" } - }, - "sort" : { - "_script" : { - "type" : "number", - "script" : { - "inline": "doc['field_name'].value * factor", - "params" : { - "factor" : 1.1 - } - }, - "order" : "asc" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "track_scores": true, - "sort" : [ - { "post_date" : {"order" : "desc"} }, - { "name" : "desc" }, - { "age" : "desc" } - ], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "version": true, - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "fields" : ["user", "postDate"], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "fields" : [], - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -null -emit snippet -body part: -{ - "mappings": { - "item": { - "properties": { - "brand": { "type": "keyword"}, - "color": { "type": "keyword"}, - "model": { "type": "keyword"} - } - } - } -} -emit snippet -query part: refresh -body part: -{ - "brand": "gucci", - "color": "red", - "model": "slim" -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "filter": [ - { "term": { "color": "red" }}, - { "term": { "brand": "gucci" }} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "filter": [ - { "term": { "color": "red" }}, - { "term": { "brand": "gucci" }} - ] - } - }, - "aggs": { - "models": { - "terms": { "field": "model" } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "filter": { - "term": { "brand": "gucci" } - } - } - }, - "aggs": { - "colors": { - "terms": { "field": "color" } - }, - "color_red": { - "filter": { - "term": { "color": "red" } - }, - "aggs": { - "models": { - "terms": { "field": "model" } - } - } - } - }, - "post_filter": { - "term": { "color": "red" } - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "size": 10, - "query": { - "match" : { - "title" : "elasticsearch" - } - }, - "sort": [ - {"date": "asc"}, - {"_uid": "desc"} - ] -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "size": 10, - "query": { - "match" : { - "title" : "elasticsearch" - } - }, - "search_after": [1463538857, "tweet#654323"], - "sort": [ - {"date": "asc"}, - {"_uid": "desc"} - ] -} -handle snippet -null -emit snippet -query part: refresh -body part: -{"index":{"_id":1}} -{"user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch"} -{"index":{"_id":2}} -{"user" : "kimchi", "post_date" : "2009-11-15T14:12:13", "message" : "My username is similar to @kimchy!"} -handle snippet -test snippet -emit snippet -query part: q=user:foo -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "bool" : { - "must" : { - "query_string" : { - "query" : "*:*" - } - }, - "filter" : { - "term" : { "user" : "kimchy" } - } - } - } -} -handle snippet -test snippet -emit snippet -query part: q=post_date:foo -handle snippet -handle snippet -test snippet -emit snippet -query part: q=post_date:foo&explain=true -handle snippet -handle snippet -test snippet -emit snippet -query part: rewrite=true -body part: -{ - "query": { - "match": { - "user": { - "query": "kimchy", - "fuzziness": "auto" - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -query part: rewrite=true -body part: -{ - "query": { - "more_like_this": { - "like": { - "_id": "2" - }, - "boost_terms": 1 - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: q=user:kimchy -handle snippet -handle snippet -test snippet -emit snippet -query part: refresh -body part: -{ - "user": "kimchy" -} -emit snippet -query part: q=user:kimchy -emit snippet -body part: -{ - "query" : { - "term" : { "user" : "kimchy" } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "this is a test", - "fields": [ "subject", "message" ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "Will Smith", - "fields": [ "title", "*_name" ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query" : "this is a test", - "fields" : [ "subject^3", "message" ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "brown fox", - "type": "best_fields", - "fields": [ "subject", "message" ], - "tie_breaker": 0.3 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "dis_max": { - "queries": [ - { "match": { "subject": "brown fox" }}, - { "match": { "message": "brown fox" }} - ], - "tie_breaker": 0.3 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "Will Smith", - "type": "best_fields", - "fields": [ "first_name", "last_name" ], - "operator": "and" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "quick brown fox", - "type": "most_fields", - "fields": [ "title", "title.original", "title.shingles" ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "should": [ - { "match": { "title": "quick brown fox" }}, - { "match": { "title.original": "quick brown fox" }}, - { "match": { "title.shingles": "quick brown fox" }} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "quick brown f", - "type": "phrase_prefix", - "fields": [ "subject", "message" ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "dis_max": { - "queries": [ - { "match_phrase_prefix": { "subject": "quick brown f" }}, - { "match_phrase_prefix": { "message": "quick brown f" }} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "Will Smith", - "type": "cross_fields", - "fields": [ "first_name", "last_name" ], - "operator": "and" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "Jon", - "type": "cross_fields", - "fields": [ - "first", "first.edge", - "last", "last.edge" - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "should": [ - { - "multi_match" : { - "query": "Will Smith", - "type": "cross_fields", - "fields": [ "first", "last" ], - "minimum_should_match": "50%" - } - }, - { - "multi_match" : { - "query": "Will Smith", - "type": "cross_fields", - "fields": [ "*.edge" ] - } - } - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "multi_match" : { - "query": "Jon", - "type": "cross_fields", - "analyzer": "standard", - "fields": [ "first", "last", "*.edge" ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string" : { - "default_field" : "content", - "query" : "this AND that OR thus" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string" : { - "fields" : ["content", "name"], - "query" : "this AND that" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string": { - "query": "(content:this OR name:this) AND (content:that OR name:that)" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string" : { - "fields" : ["content", "name^5"], - "query" : "this AND that OR thus", - "use_dis_max" : true - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string" : { - "fields" : ["city.*"], - "query" : "this AND that OR thus", - "use_dis_max" : true - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string" : { - "fields" : ["content", "name.*^5"], - "query" : "this AND that OR thus", - "use_dis_max" : true - } - } -} -handle snippet -null -emit snippet -body part: -{ - "mappings": { - "location": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } - } - } - } - } - } -} -emit snippet -body part: -{ - "pin" : { - "location" : { - "lat" : 40.12, - "lon" : -71.34 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_distance" : { - "distance" : "200km", - "pin.location" : { - "lat" : 40, - "lon" : -70 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_distance" : { - "distance" : "12km", - "pin.location" : { - "lat" : 40, - "lon" : -70 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_distance" : { - "distance" : "12km", - "pin.location" : [-70, 40] - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_distance" : { - "distance" : "12km", - "pin.location" : "40,-70" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_distance" : { - "distance" : "12km", - "pin.location" : "drm3btev3e86" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_all": {} - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_all": { "boost" : 1.2 } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_none": {} - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "more_like_this" : { - "fields" : ["title", "description"], - "like" : "Once upon a time", - "min_term_freq" : 1, - "max_query_terms" : 12 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "more_like_this" : { - "fields" : ["title", "description"], - "like" : [ - { - "_index" : "imdb", - "_type" : "movies", - "_id" : "1" - }, - { - "_index" : "imdb", - "_type" : "movies", - "_id" : "2" - }, - "and potentially some more text here as well" - ], - "min_term_freq" : 1, - "max_query_terms" : 12 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "more_like_this" : { - "fields" : ["name.first", "name.last"], - "like" : [ - { - "_index" : "marvel", - "_type" : "quotes", - "doc" : { - "name": { - "first": "Ben", - "last": "Grimm" - }, - "tweet": "You got no idea what I'd... what I'd give to be invisible." - } - }, - { - "_index" : "marvel", - "_type" : "quotes", - "_id" : "2" - } - ], - "min_term_freq" : 1, - "max_query_terms" : 12 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "movies": { - "properties": { - "title": { - "type": "text", - "term_vector": "yes" - }, - "description": { - "type": "text" - }, - "tags": { - "type": "text", - "fields" : { - "raw": { - "type" : "text", - "analyzer": "keyword", - "term_vector" : "yes" - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "indices" : { - "indices" : ["index1", "index2"], - "query" : { "term" : { "tag" : "wow" } }, - "no_match_query" : { "term" : { "tag" : "kow" } } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "range" : { - "age" : { - "gte" : 10, - "lte" : 20, - "boost" : 2.0 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "range" : { - "date" : { - "gte" : "now-1d/d", - "lt" : "now/d" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "range" : { - "born" : { - "gte": "01/01/2012", - "lte": "2013", - "format": "dd/MM/yyyy||yyyy" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "range" : { - "timestamp" : { - "gte": "2015-01-01 00:00:00", - "lte": "now", - "time_zone": "+01:00" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "common": { - "body": { - "query": "this is bonsai cool", - "cutoff_frequency": 0.001 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant as a cartoon", - "cutoff_frequency": 0.001, - "low_freq_operator": "and" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant as a cartoon", - "cutoff_frequency": 0.001, - "minimum_should_match": 2 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": { - "bool": { - "should": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "minimum_should_match": 2 - } - }, - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant not as a cartoon", - "cutoff_frequency": 0.001, - "minimum_should_match": { - "low_freq" : 2, - "high_freq" : 3 - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": { - "bool": { - "should": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "minimum_should_match": 2 - } - }, - "should": { - "bool": { - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "not"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ], - "minimum_should_match": 3 - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "common": { - "body": { - "query": "how not to be", - "cutoff_frequency": 0.001, - "minimum_should_match": { - "low_freq" : 2, - "high_freq" : 3 - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "should": [ - { "term": { "body": "how"}}, - { "term": { "body": "not"}}, - { "term": { "body": "to"}}, - { "term": { "body": "be"}} - ], - "minimum_should_match": "3<50%" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_within" : { - "little" : { - "span_term" : { "field1" : "foo" } - }, - "big" : { - "span_near" : { - "clauses" : [ - { "span_term" : { "field1" : "bar" } }, - { "span_term" : { "field1" : "baz" } } - ], - "slop" : 5, - "in_order" : true - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_near" : { - "clauses" : [ - { "span_term" : { "field" : "value1" } }, - { "span_term" : { "field" : "value2" } }, - { "span_term" : { "field" : "value3" } } - ], - "slop" : 12, - "in_order" : false - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_distance_range" : { - "from" : "200km", - "to" : "400km", - "pin.location" : { - "lat" : 40, - "lon" : -70 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "simple_query_string" : { - "query": "\"fried eggs\" +(eggplant | potato) -frittata", - "analyzer": "snowball", - "fields": ["body^5","_all"], - "default_operator": "and" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "simple_query_string" : { - "fields" : ["content", "name.*^5"], - "query" : "foo bar baz" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "simple_query_string" : { - "query" : "foo | bar + baz*", - "flags" : "OR|AND|PREFIX" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_multi":{ - "match":{ - "prefix" : { "user" : { "value" : "ki" } } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_multi":{ - "match":{ - "prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "constant_score" : { - "filter" : { - "term" : { "user" : "kimchy"} - }, - "boost" : 1.2 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "wildcard" : { "user" : "ki*y" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } } - } -} -handle snippet -null -emit snippet -body part: -{ - "mappings": { - "type1" : { - "properties" : { - "obj1" : { - "type" : "nested" - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "nested" : { - "path" : "obj1", - "score_mode" : "avg", - "query" : { - "bool" : { - "must" : [ - { "match" : {"obj1.name" : "blue"} }, - { "range" : {"obj1.count" : {"gt" : 5}} } - ] - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "term" : { "user" : "Kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "should": [ - { - "term": { - "status": { - "value": "urgent", - "boost": 2.0 - } - } - }, - { - "term": { - "status": "normal" - } - } - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "full_text": { - "type": "text" - }, - "exact_value": { - "type": "keyword" - } - } - } - } -} -emit snippet -body part: -{ - "full_text": "Quick Foxes!", - "exact_value": "Quick Foxes!" -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "term": { - "exact_value": "Quick Foxes!" - } - } -} -emit snippet -body part: -{ - "query": { - "term": { - "full_text": "Quick Foxes!" - } - } -} -emit snippet -body part: -{ - "query": { - "term": { - "full_text": "foxes" - } - } -} -emit snippet -body part: -{ - "query": { - "match": { - "full_text": "Quick Foxes!" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match" : { - "message" : "this is a test" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match" : { - "message" : { - "query" : "this is a test", - "operator" : "and" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match" : { - "message" : { - "query" : "to be or not to be", - "operator" : "and", - "zero_terms_query": "all" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match" : { - "message" : { - "query" : "to be or not to be", - "cutoff_frequency" : 0.001 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": [ - { "match": { "title": "Search" }}, - { "match": { "content": "Elasticsearch" }} - ], - "filter": [ - { "term": { "status": "published" }}, - { "range": { "publish_date": { "gte": "2015-01-01" }}} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "ids" : { - "type" : "my_type", - "values" : ["1", "4", "100"] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "dis_max" : { - "tie_breaker" : 0.7, - "boost" : 1.2, - "queries" : [ - { - "term" : { "age" : 34 } - }, - { - "term" : { "age" : 35 } - } - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_phrase" : { - "message" : "this is a test" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_phrase" : { - "message" : { - "query" : "this is a test", - "analyzer" : "my_analyzer" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "script" : { - "script" : "doc['num1'].value > 1" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "script" : { - "script" : { - "inline" : "doc['num1'].value > param1", - "params" : { - "param1" : 5 - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ "query": { - "prefix" : { "user" : "ki" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ "query": { - "prefix" : { "user" : { "value" : "ki", "boost" : 2.0 } } - } -} -handle snippet -test snippet -emit snippet -body part: -{ "query": { - "prefix" : { "user" : { "prefix" : "ki", "boost" : 2.0 } } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "term" : { "user" : "kimchy" } - }, - "filter": { - "term" : { "tag" : "tech" } - }, - "must_not" : { - "range" : { - "age" : { "from" : 10, "to" : 20 } - } - }, - "should" : [ - { "term" : { "tag" : "wow" } }, - { "term" : { "tag" : "elasticsearch" } } - ], - "minimum_should_match" : 1, - "boost" : 1.0 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "filter": { - "term": { - "status": "active" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": { - "match_all": {} - }, - "filter": { - "term": { - "status": "active" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "constant_score": { - "filter": { - "term": { - "status": "active" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_containing" : { - "little" : { - "span_term" : { "field1" : "foo" } - }, - "big" : { - "span_near" : { - "clauses" : [ - { "span_term" : { "field1" : "bar" } }, - { "span_term" : { "field1" : "baz" } } - ], - "slop" : 5, - "in_order" : true - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "boosting" : { - "positive" : { - "term" : { - "field1" : "value1" - } - }, - "negative" : { - "term" : { - "field2" : "value2" - } - }, - "negative_boost" : 0.2 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "has_child" : { - "type" : "blog_tag", - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "has_child" : { - "type" : "blog_tag", - "score_mode" : "min", - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "has_child" : { - "type" : "blog_tag", - "score_mode" : "min", - "min_children": 2, - "max_children": 10, - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "constant_score" : { - "filter" : { - "terms" : { "user" : ["kimchy", "elasticsearch"]} - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "followers" : ["1", "3"] -} -emit snippet -body part: -{ - "user" : "1" -} -emit snippet -body part: -{ - "query" : { - "terms" : { - "user" : { - "index" : "users", - "type" : "user", - "id" : "2", - "path" : "followers" - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "exists" : { "field" : "user" } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must_not": { - "exists": { - "field": "user" - } - } - } - } -} -handle snippet -null -emit snippet -body part: -{ - "mappings" : { - "location": { - "properties": { - "pin": { - "type": "geo_point", - "geohash": true, - "geohash_prefix": true, - "geohash_precision": 10 - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geohash_cell": { - "pin": { - "lat": 13.4080, - "lon": 52.5186 - }, - "precision": 3, - "neighbors": true - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "regexp":{ - "name.first": "s.*y" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "regexp":{ - "name.first":{ - "value":"s.*y", - "boost":1.2 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "regexp":{ - "name.first": { - "value": "s.*y", - "flags" : "INTERSECTION|COMPLEMENT|EMPTY" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "regexp":{ - "name.first": { - "value": "s.*y", - "flags" : "INTERSECTION|COMPLEMENT|EMPTY", - "max_determinized_states": 20000 - } - } - } -} -handle snippet -null -emit snippet -body part: -{ - "mappings": { - "blog_post": { - "properties": { - "name": { - "type": "keyword" - } - } - }, - "blog_tag": { - "_parent": { - "type": "blog_post" - }, - "_routing": { - "required": true - } - } - } -} -emit snippet -query part: wait_for_status=yellow -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "parent_id" : { - "type" : "blog_tag", - "id" : "1" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "has_parent": { - "type": "blog_post", - "query": { - "term": { - "_id": "1" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "has_parent" : { - "parent_type" : "blog", - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "has_parent" : { - "parent_type" : "blog", - "score" : true, - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "template": { - "inline": { "match": { "text": "{{query_string}}" }}, - "params" : { - "query_string" : "all about search" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match": { - "text": "all about search" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "template": { - "inline": "{ \"match\": { \"text\": \"{{query_string}}\" }}", - "params" : { - "query_string" : "all about search" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "template": { - "file": "my_template", - "params" : { - "query_string" : "all about search" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "template": { "match": { "text": "{{query_string}}" }} -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "template": { - "id": "my_template", - "params" : { - "query_string" : "all about search" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_first" : { - "match" : { - "span_term" : { "user" : "kimchy" } - }, - "end" : 3 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_polygon" : { - "person.location" : { - "points" : [ - {"lat" : 40, "lon" : -70}, - {"lat" : 30, "lon" : -80}, - {"lat" : 20, "lon" : -90} - ] - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_polygon" : { - "person.location" : { - "points" : [ - [-70, 40], - [-80, 30], - [-90, 20] - ] - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_polygon" : { - "person.location" : { - "points" : [ - "40, -70", - "30, -80", - "20, -90" - ] - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_polygon" : { - "person.location" : { - "points" : [ - "drn5x1g8cu2y", - "30, -80", - "20, -90" - ] - } - } - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query":{ - "bool": { - "must": { - "match_all": {} - }, - "filter": { - "geo_shape": { - "location": { - "shape": { - "type": "envelope", - "coordinates" : [[13.0, 53.0], [14.0, 52.0]] - }, - "relation": "within" - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": { - "match_all": {} - }, - "filter": { - "geo_shape": { - "location": { - "indexed_shape": { - "id": "DEU", - "type": "countries", - "index": "shapes", - "path": "location" - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "function_score": { - "query": {}, - "boost": "5", - "random_score": {}, - "boost_mode":"multiply" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "function_score": { - "query": {}, - "boost": "5", - "functions": [ - { - "filter": {}, - "random_score": {}, - "weight": 23 - }, - { - "filter": {}, - "weight": 42 - } - ], - "max_boost": 42, - "score_mode": "max", - "boost_mode": "multiply", - "min_score" : 42 - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "function_score": { - "functions": [ - { - "gauss": { - "price": { - "origin": "0", - "scale": "20" - } - } - }, - { - "gauss": { - "location": { - "origin": "11, 12", - "scale": "2km" - } - } - } - ], - "query": { - "match": { - "properties": "balcony" - } - }, - "score_mode": "multiply" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_or" : { - "clauses" : [ - { "span_term" : { "field" : "value1" } }, - { "span_term" : { "field" : "value2" } }, - { "span_term" : { "field" : "value3" } } - ] - } - } -} -handle snippet -null -emit snippet -body part: -{ - "mappings": { - "location": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } - } - } - } - } - } -} -emit snippet -body part: -{ - "pin" : { - "location" : { - "lat" : 40.12, - "lon" : -71.34 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top_left" : { - "lat" : 40.73, - "lon" : -74.1 - }, - "bottom_right" : { - "lat" : 40.01, - "lon" : -71.12 - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top_left" : { - "lat" : 40.73, - "lon" : -74.1 - }, - "bottom_right" : { - "lat" : 40.01, - "lon" : -71.12 - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top_left" : [-74.1, 40.73], - "bottom_right" : [-71.12, 40.01] - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top_left" : "40.73, -74.1", - "bottom_right" : "40.01, -71.12" - } - } - } - } -} -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top_left" : "dr5r9ydj2y73", - "bottom_right" : "drj7teegpus6" - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top" : 40.73, - "left" : -74.1, - "bottom" : 40.01, - "right" : -71.12 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool" : { - "must" : { - "match_all" : {} - }, - "filter" : { - "geo_bounding_box" : { - "pin.location" : { - "top_left" : { - "lat" : 40.73, - "lon" : -74.1 - }, - "bottom_right" : { - "lat" : 40.10, - "lon" : -71.12 - } - }, - "type" : "indexed" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_phrase_prefix" : { - "message" : "quick brown f" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_phrase_prefix" : { - "message" : { - "query" : "quick brown f", - "max_expansions" : 10 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "doctype": { - "properties": { - "message": { - "type": "string" - } - } - }, - "queries": { - "properties": { - "query": { - "type": "percolator" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match" : { - "message" : "bonsai tree" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "percolate" : { - "field" : "query", - "document_type" : "doctype", - "document" : { - "message" : "A new bonsai tree in the office" - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "message" : "A new bonsai tree in the office" -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "percolate" : { - "field": "query", - "document_type" : "doctype", - "index" : "my-index", - "type" : "message", - "id" : "1", - "version" : 1 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match" : { - "message" : "brown fox" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "match" : { - "message" : "lazy dog" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query" : { - "percolate" : { - "field": "query", - "document_type" : "doctype", - "document" : { - "message" : "The quick brown fox jumps over the lazy dog" - } - } - }, - "highlight": { - "fields": { - "message": {} - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "term" : { - "query.unknown_query" : "" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_not" : { - "include" : { - "span_term" : { "field1" : "hoya" } - }, - "exclude" : { - "span_near" : { - "clauses" : [ - { "span_term" : { "field1" : "la" } }, - { "span_term" : { "field1" : "hoya" } } - ], - "slop" : 0, - "in_order" : true - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "type" : { - "value" : "my_type" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_term" : { "user" : "kimchy" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_term" : { "user" : { "value" : "kimchy", "boost" : 2.0 } } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "span_term" : { "user" : { "term" : "kimchy", "boost" : 2.0 } } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "fuzzy" : { "user" : "ki" } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "fuzzy" : { - "user" : { - "value" : "ki", - "boost" : 1.0, - "fuzziness" : 2, - "prefix_length" : 0, - "max_expansions": 100 - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "description" : "describe pipeline", - "processors" : [ - { - "set" : { - "field": "foo", - "value": "bar" - } - } - // other processors - ] -} -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "pipeline" : - { - "description": "_description", - "processors": [ - { - "set" : { - "field" : "field2", - "value" : "_value" - } - } - ] - }, - "docs": [ - { - "_index": "index", - "_type": "type", - "_id": "id", - "_source": { - "foo": "bar" - } - }, - { - "_index": "index", - "_type": "type", - "_id": "id", - "_source": { - "foo": "rab" - } - } - ] -} -handle snippet -handle snippet -test snippet -emit snippet -query part: verbose -body part: -{ - "pipeline" : - { - "description": "_description", - "processors": [ - { - "set" : { - "field" : "field2", - "value" : "_value2" - } - }, - { - "set" : { - "field" : "field3", - "value" : "_value3" - } - } - ] - }, - "docs": [ - { - "_index": "index", - "_type": "type", - "_id": "id", - "_source": { - "foo": "bar" - } - }, - { - "_index": "index", - "_type": "type", - "_id": "id", - "_source": { - "foo": "rab" - } - } - ] -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -query part: nodes=nodeId1,nodeId2 -emit snippet -query part: nodes=nodeId1,nodeId2&actions=cluster:* -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -query part: parent_task_id=parentTaskId:1 -handle snippet -test snippet -emit snippet -query part: wait_for_completion=true&timeout=10s -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -query part: node_id=nodeId1,nodeId2&actions=*reindex -handle snippet -test snippet -emit snippet -query part: group_by=parents -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "transient" : { - "cluster.routing.allocation.exclude._ip" : "10.0.0.1" - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "transient": { - "cluster.routing.allocation.include._ip": "192.168.2.*" - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "transient": { - "cluster.routing.allocation.disk.watermark.low": "80%", - "cluster.routing.allocation.disk.watermark.high": "50gb", - "cluster.info.update.interval": "1m" - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -null -emit snippet -query part: refresh -body part: -{"index":{"_id":1}} -{"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]} -{"index":{"_id":2}} -{"first":"sean","last":"monohan","goals":[7,54,26],"assists":[11,26,13],"gp":[26,82,82]} -{"index":{"_id":3}} -{"first":"jiri","last":"hudler","goals":[5,34,36],"assists":[11,62,42],"gp":[24,80,79]} -{"index":{"_id":4}} -{"first":"micheal","last":"frolik","goals":[4,6,15],"assists":[8,23,15],"gp":[26,82,82]} -{"index":{"_id":5}} -{"first":"sam","last":"bennett","goals":[5,0,0],"assists":[8,1,0],"gp":[26,1,0]} -{"index":{"_id":6}} -{"first":"dennis","last":"wideman","goals":[0,26,15],"assists":[11,30,24],"gp":[26,81,82]} -{"index":{"_id":7}} -{"first":"david","last":"jones","goals":[7,19,5],"assists":[3,17,4],"gp":[26,45,34]} -{"index":{"_id":8}} -{"first":"tj","last":"brodie","goals":[2,14,7],"assists":[8,42,30],"gp":[26,82,82]} -{"index":{"_id":39}} -{"first":"mark","last":"giordano","goals":[6,30,15],"assists":[3,30,24],"gp":[26,60,63]} -{"index":{"_id":10}} -{"first":"mikael","last":"backlund","goals":[3,15,13],"assists":[6,24,18],"gp":[26,82,82]} -{"index":{"_id":11}} -{"first":"joe","last":"colborne","goals":[3,18,13],"assists":[6,20,24],"gp":[26,67,82]} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "function_score": { - "script_score": { - "script": { - "lang": "painless", - "inline": "int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_all": {} - }, - "script_fields": { - "total_goals": { - "script": { - "lang": "painless", - "inline": "int total = 0; for (int i = 0; i < doc['goals'].length; ++i) { total += doc['goals'][i]; } return total;" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match_all": {} - }, - "sort": { - "_script": { - "type": "string", - "order": "asc", - "script": { - "lang": "painless", - "inline": "doc['first'].value + ' ' + doc['last'].value" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "fields": [ - "_id", - "_source" - ], - "query": { - "term": { - "_id": 1 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "script": { - "lang": "painless", - "inline": "ctx._source.last = params.last", - "params": { - "last": "hockey" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "script": { - "lang": "painless", - "inline": "ctx._source.last = params.last; ctx._source.nick = params.nick", - "params": { - "last": "gaudreau", - "nick": "hockey" - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "text": "quick brown fox", - "popularity": 1 -} -emit snippet -body part: -{ - "text": "quick fox", - "popularity": 5 -} -emit snippet -body part: -{ - "query": { - "function_score": { - "query": { - "match": { - "text": "quick brown fox" - } - }, - "script_score": { - "script": { - "lang": "expression", - "inline": "_score * doc['popularity']" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "cost_price": 100 -} -emit snippet -body part: -{ - "script_fields": { - "sales_price": { - "script": { - "lang": "expression", - "inline": "doc['cost_price'] * markup", - "params": { - "markup": 0.2 - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "title": { - "type": "text" - }, - "first_name": { - "type": "text", - "store": true - }, - "last_name": { - "type": "text", - "store": true - } - } - } - } -} -emit snippet -body part: -{ - "title": "Mr", - "first_name": "Barry", - "last_name": "White" -} -emit snippet -body part: -{ - "script_fields": { - "source": { - "script": { - "lang": "groovy", - "inline": "_source.title + ' ' + _source.first_name + ' ' + _source.last_name" - } - }, - "stored_fields": { - "script": { - "lang": "groovy", - "inline": "_fields['first_name'].value + ' ' + _fields['last_name'].value" - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "my_field": 5 -} -emit snippet -body part: -{ - "script_fields": { - "my_doubled_field": { - "script": { - "lang": "expression", - "inline": "doc['my_field'] * multiplier", - "params": { - "multiplier": 2 - } - } - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "script": "log(_score * 2) + my_modifier" -} -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "script": { - "script": { - "lang": "groovy", - "id": "calculate-score", - "params": { - "my_modifier": 2 - } - } - } - } -} -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -catch part: /cannot set discovery.zen.minimum_master_nodes to more than the current master nodes/ -body part: -{ - "transient": { - "discovery.zen.minimum_master_nodes": 2 - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} - {"index":{"_id": "5"}} - {"user": "test", "message": "some message with the number 5", "date": "2009-11-15T14:12:12", "likes": 5} - {"index":{"_id": "6"}} - {"user": "test", "message": "some message with the number 6", "date": "2009-11-15T14:12:12", "likes": 6} - {"index":{"_id": "7"}} - {"user": "test", "message": "some message with the number 7", "date": "2009-11-15T14:12:12", "likes": 7} - {"index":{"_id": "8"}} - {"user": "test", "message": "some message with the number 8", "date": "2009-11-15T14:12:12", "likes": 8} - {"index":{"_id": "9"}} - {"user": "test", "message": "some message with the number 9", "date": "2009-11-15T14:12:12", "likes": 9} - {"index":{"_id": "10"}} - {"user": "test", "message": "some message with the number 10", "date": "2009-11-15T14:12:12", "likes": 10} - {"index":{"_id": "11"}} - {"user": "test", "message": "some message with the number 11", "date": "2009-11-15T14:12:12", "likes": 11} - {"index":{"_id": "12"}} - {"user": "test", "message": "some message with the number 12", "date": "2009-11-15T14:12:12", "likes": 12} - {"index":{"_id": "13"}} - {"user": "test", "message": "some message with the number 13", "date": "2009-11-15T14:12:12", "likes": 13} - {"index":{"_id": "14"}} - {"user": "test", "message": "some message with the number 14", "date": "2009-11-15T14:12:12", "likes": 14} - {"index":{"_id": "15"}} - {"user": "test", "message": "some message with the number 15", "date": "2009-11-15T14:12:12", "likes": 15} - {"index":{"_id": "16"}} - {"user": "test", "message": "some message with the number 16", "date": "2009-11-15T14:12:12", "likes": 16} - {"index":{"_id": "17"}} - {"user": "test", "message": "some message with the number 17", "date": "2009-11-15T14:12:12", "likes": 17} - {"index":{"_id": "18"}} - {"user": "test", "message": "some message with the number 18", "date": "2009-11-15T14:12:12", "likes": 18} - {"index":{"_id": "19"}} - {"user": "test", "message": "some message with the number 19", "date": "2009-11-15T14:12:12", "likes": 19} - {"index":{"_id": "20"}} - {"user": "test", "message": "some message with the number 20", "date": "2009-11-15T14:12:12", "likes": 20} - {"index":{"_id": "21"}} - {"user": "test", "message": "some message with the number 21", "date": "2009-11-15T14:12:12", "likes": 21} - {"index":{"_id": "22"}} - {"user": "test", "message": "some message with the number 22", "date": "2009-11-15T14:12:12", "likes": 22} - {"index":{"_id": "23"}} - {"user": "test", "message": "some message with the number 23", "date": "2009-11-15T14:12:12", "likes": 23} - {"index":{"_id": "24"}} - {"user": "test", "message": "some message with the number 24", "date": "2009-11-15T14:12:12", "likes": 24} - {"index":{"_id": "25"}} - {"user": "test", "message": "some message with the number 25", "date": "2009-11-15T14:12:12", "likes": 25} - {"index":{"_id": "26"}} - {"user": "test", "message": "some message with the number 26", "date": "2009-11-15T14:12:12", "likes": 26} - {"index":{"_id": "27"}} - {"user": "test", "message": "some message with the number 27", "date": "2009-11-15T14:12:12", "likes": 27} - {"index":{"_id": "28"}} - {"user": "test", "message": "some message with the number 28", "date": "2009-11-15T14:12:12", "likes": 28} - {"index":{"_id": "29"}} - {"user": "test", "message": "some message with the number 29", "date": "2009-11-15T14:12:12", "likes": 29} - {"index":{"_id": "30"}} - {"user": "test", "message": "some message with the number 30", "date": "2009-11-15T14:12:12", "likes": 30} - {"index":{"_id": "31"}} - {"user": "test", "message": "some message with the number 31", "date": "2009-11-15T14:12:12", "likes": 31} - {"index":{"_id": "32"}} - {"user": "test", "message": "some message with the number 32", "date": "2009-11-15T14:12:12", "likes": 32} - {"index":{"_id": "33"}} - {"user": "test", "message": "some message with the number 33", "date": "2009-11-15T14:12:12", "likes": 33} - {"index":{"_id": "34"}} - {"user": "test", "message": "some message with the number 34", "date": "2009-11-15T14:12:12", "likes": 34} - {"index":{"_id": "35"}} - {"user": "test", "message": "some message with the number 35", "date": "2009-11-15T14:12:12", "likes": 35} - {"index":{"_id": "36"}} - {"user": "test", "message": "some message with the number 36", "date": "2009-11-15T14:12:12", "likes": 36} - {"index":{"_id": "37"}} - {"user": "test", "message": "some message with the number 37", "date": "2009-11-15T14:12:12", "likes": 37} - {"index":{"_id": "38"}} - {"user": "test", "message": "some message with the number 38", "date": "2009-11-15T14:12:12", "likes": 38} - {"index":{"_id": "39"}} - {"user": "test", "message": "some message with the number 39", "date": "2009-11-15T14:12:12", "likes": 39} - {"index":{"_id": "40"}} - {"user": "test", "message": "some message with the number 40", "date": "2009-11-15T14:12:12", "likes": 40} - {"index":{"_id": "41"}} - {"user": "test", "message": "some message with the number 41", "date": "2009-11-15T14:12:12", "likes": 41} - {"index":{"_id": "42"}} - {"user": "test", "message": "some message with the number 42", "date": "2009-11-15T14:12:12", "likes": 42} - {"index":{"_id": "43"}} - {"user": "test", "message": "some message with the number 43", "date": "2009-11-15T14:12:12", "likes": 43} - {"index":{"_id": "44"}} - {"user": "test", "message": "some message with the number 44", "date": "2009-11-15T14:12:12", "likes": 44} - {"index":{"_id": "45"}} - {"user": "test", "message": "some message with the number 45", "date": "2009-11-15T14:12:12", "likes": 45} - {"index":{"_id": "46"}} - {"user": "test", "message": "some message with the number 46", "date": "2009-11-15T14:12:12", "likes": 46} - {"index":{"_id": "47"}} - {"user": "test", "message": "some message with the number 47", "date": "2009-11-15T14:12:12", "likes": 47} - {"index":{"_id": "48"}} - {"user": "test", "message": "some message with the number 48", "date": "2009-11-15T14:12:12", "likes": 48} - {"index":{"_id": "49"}} - {"user": "test", "message": "some message with the number 49", "date": "2009-11-15T14:12:12", "likes": 49} - {"index":{"_id": "50"}} - {"user": "test", "message": "some message with the number 50", "date": "2009-11-15T14:12:12", "likes": 50} - {"index":{"_id": "51"}} - {"user": "test", "message": "some message with the number 51", "date": "2009-11-15T14:12:12", "likes": 51} - {"index":{"_id": "52"}} - {"user": "test", "message": "some message with the number 52", "date": "2009-11-15T14:12:12", "likes": 52} - {"index":{"_id": "53"}} - {"user": "test", "message": "some message with the number 53", "date": "2009-11-15T14:12:12", "likes": 53} - {"index":{"_id": "54"}} - {"user": "test", "message": "some message with the number 54", "date": "2009-11-15T14:12:12", "likes": 54} - {"index":{"_id": "55"}} - {"user": "test", "message": "some message with the number 55", "date": "2009-11-15T14:12:12", "likes": 55} - {"index":{"_id": "56"}} - {"user": "test", "message": "some message with the number 56", "date": "2009-11-15T14:12:12", "likes": 56} - {"index":{"_id": "57"}} - {"user": "test", "message": "some message with the number 57", "date": "2009-11-15T14:12:12", "likes": 57} - {"index":{"_id": "58"}} - {"user": "test", "message": "some message with the number 58", "date": "2009-11-15T14:12:12", "likes": 58} - {"index":{"_id": "59"}} - {"user": "test", "message": "some message with the number 59", "date": "2009-11-15T14:12:12", "likes": 59} - {"index":{"_id": "60"}} - {"user": "test", "message": "some message with the number 60", "date": "2009-11-15T14:12:12", "likes": 60} - {"index":{"_id": "61"}} - {"user": "test", "message": "some message with the number 61", "date": "2009-11-15T14:12:12", "likes": 61} - {"index":{"_id": "62"}} - {"user": "test", "message": "some message with the number 62", "date": "2009-11-15T14:12:12", "likes": 62} - {"index":{"_id": "63"}} - {"user": "test", "message": "some message with the number 63", "date": "2009-11-15T14:12:12", "likes": 63} - {"index":{"_id": "64"}} - {"user": "test", "message": "some message with the number 64", "date": "2009-11-15T14:12:12", "likes": 64} - {"index":{"_id": "65"}} - {"user": "test", "message": "some message with the number 65", "date": "2009-11-15T14:12:12", "likes": 65} - {"index":{"_id": "66"}} - {"user": "test", "message": "some message with the number 66", "date": "2009-11-15T14:12:12", "likes": 66} - {"index":{"_id": "67"}} - {"user": "test", "message": "some message with the number 67", "date": "2009-11-15T14:12:12", "likes": 67} - {"index":{"_id": "68"}} - {"user": "test", "message": "some message with the number 68", "date": "2009-11-15T14:12:12", "likes": 68} - {"index":{"_id": "69"}} - {"user": "test", "message": "some message with the number 69", "date": "2009-11-15T14:12:12", "likes": 69} - {"index":{"_id": "70"}} - {"user": "test", "message": "some message with the number 70", "date": "2009-11-15T14:12:12", "likes": 70} - {"index":{"_id": "71"}} - {"user": "test", "message": "some message with the number 71", "date": "2009-11-15T14:12:12", "likes": 71} - {"index":{"_id": "72"}} - {"user": "test", "message": "some message with the number 72", "date": "2009-11-15T14:12:12", "likes": 72} - {"index":{"_id": "73"}} - {"user": "test", "message": "some message with the number 73", "date": "2009-11-15T14:12:12", "likes": 73} - {"index":{"_id": "74"}} - {"user": "test", "message": "some message with the number 74", "date": "2009-11-15T14:12:12", "likes": 74} - {"index":{"_id": "75"}} - {"user": "test", "message": "some message with the number 75", "date": "2009-11-15T14:12:12", "likes": 75} - {"index":{"_id": "76"}} - {"user": "test", "message": "some message with the number 76", "date": "2009-11-15T14:12:12", "likes": 76} - {"index":{"_id": "77"}} - {"user": "test", "message": "some message with the number 77", "date": "2009-11-15T14:12:12", "likes": 77} - {"index":{"_id": "78"}} - {"user": "test", "message": "some message with the number 78", "date": "2009-11-15T14:12:12", "likes": 78} - {"index":{"_id": "79"}} - {"user": "test", "message": "some message with the number 79", "date": "2009-11-15T14:12:12", "likes": 79} - {"index":{"_id": "80"}} - {"user": "test", "message": "some message with the number 80", "date": "2009-11-15T14:12:12", "likes": 80} - {"index":{"_id": "81"}} - {"user": "test", "message": "some message with the number 81", "date": "2009-11-15T14:12:12", "likes": 81} - {"index":{"_id": "82"}} - {"user": "test", "message": "some message with the number 82", "date": "2009-11-15T14:12:12", "likes": 82} - {"index":{"_id": "83"}} - {"user": "test", "message": "some message with the number 83", "date": "2009-11-15T14:12:12", "likes": 83} - {"index":{"_id": "84"}} - {"user": "test", "message": "some message with the number 84", "date": "2009-11-15T14:12:12", "likes": 84} - {"index":{"_id": "85"}} - {"user": "test", "message": "some message with the number 85", "date": "2009-11-15T14:12:12", "likes": 85} - {"index":{"_id": "86"}} - {"user": "test", "message": "some message with the number 86", "date": "2009-11-15T14:12:12", "likes": 86} - {"index":{"_id": "87"}} - {"user": "test", "message": "some message with the number 87", "date": "2009-11-15T14:12:12", "likes": 87} - {"index":{"_id": "88"}} - {"user": "test", "message": "some message with the number 88", "date": "2009-11-15T14:12:12", "likes": 88} - {"index":{"_id": "89"}} - {"user": "test", "message": "some message with the number 89", "date": "2009-11-15T14:12:12", "likes": 89} - {"index":{"_id": "90"}} - {"user": "test", "message": "some message with the number 90", "date": "2009-11-15T14:12:12", "likes": 90} - {"index":{"_id": "91"}} - {"user": "test", "message": "some message with the number 91", "date": "2009-11-15T14:12:12", "likes": 91} - {"index":{"_id": "92"}} - {"user": "test", "message": "some message with the number 92", "date": "2009-11-15T14:12:12", "likes": 92} - {"index":{"_id": "93"}} - {"user": "test", "message": "some message with the number 93", "date": "2009-11-15T14:12:12", "likes": 93} - {"index":{"_id": "94"}} - {"user": "test", "message": "some message with the number 94", "date": "2009-11-15T14:12:12", "likes": 94} - {"index":{"_id": "95"}} - {"user": "test", "message": "some message with the number 95", "date": "2009-11-15T14:12:12", "likes": 95} - {"index":{"_id": "96"}} - {"user": "test", "message": "some message with the number 96", "date": "2009-11-15T14:12:12", "likes": 96} - {"index":{"_id": "97"}} - {"user": "test", "message": "some message with the number 97", "date": "2009-11-15T14:12:12", "likes": 97} - {"index":{"_id": "98"}} - {"user": "test", "message": "some message with the number 98", "date": "2009-11-15T14:12:12", "likes": 98} - {"index":{"_id": "99"}} - {"user": "test", "message": "some message with the number 99", "date": "2009-11-15T14:12:12", "likes": 99} - {"index":{"_id": "100"}} - {"user": "test", "message": "some message with the number 100", "date": "2009-11-15T14:12:12", "likes": 100} - {"index":{"_id": "101"}} - {"user": "test", "message": "some message with the number 101", "date": "2009-11-15T14:12:12", "likes": 101} - {"index":{"_id": "102"}} - {"user": "test", "message": "some message with the number 102", "date": "2009-11-15T14:12:12", "likes": 102} - {"index":{"_id": "103"}} - {"user": "test", "message": "some message with the number 103", "date": "2009-11-15T14:12:12", "likes": 103} - {"index":{"_id": "104"}} - {"user": "test", "message": "some message with the number 104", "date": "2009-11-15T14:12:12", "likes": 104} - {"index":{"_id": "105"}} - {"user": "test", "message": "some message with the number 105", "date": "2009-11-15T14:12:12", "likes": 105} - {"index":{"_id": "106"}} - {"user": "test", "message": "some message with the number 106", "date": "2009-11-15T14:12:12", "likes": 106} - {"index":{"_id": "107"}} - {"user": "test", "message": "some message with the number 107", "date": "2009-11-15T14:12:12", "likes": 107} - {"index":{"_id": "108"}} - {"user": "test", "message": "some message with the number 108", "date": "2009-11-15T14:12:12", "likes": 108} - {"index":{"_id": "109"}} - {"user": "test", "message": "some message with the number 109", "date": "2009-11-15T14:12:12", "likes": 109} - {"index":{"_id": "110"}} - {"user": "test", "message": "some message with the number 110", "date": "2009-11-15T14:12:12", "likes": 110} - {"index":{"_id": "111"}} - {"user": "test", "message": "some message with the number 111", "date": "2009-11-15T14:12:12", "likes": 111} - {"index":{"_id": "112"}} - {"user": "test", "message": "some message with the number 112", "date": "2009-11-15T14:12:12", "likes": 112} - {"index":{"_id": "113"}} - {"user": "test", "message": "some message with the number 113", "date": "2009-11-15T14:12:12", "likes": 113} - {"index":{"_id": "114"}} - {"user": "test", "message": "some message with the number 114", "date": "2009-11-15T14:12:12", "likes": 114} - {"index":{"_id": "115"}} - {"user": "test", "message": "some message with the number 115", "date": "2009-11-15T14:12:12", "likes": 115} - {"index":{"_id": "116"}} - {"user": "test", "message": "some message with the number 116", "date": "2009-11-15T14:12:12", "likes": 116} - {"index":{"_id": "117"}} - {"user": "test", "message": "some message with the number 117", "date": "2009-11-15T14:12:12", "likes": 117} - {"index":{"_id": "118"}} - {"user": "test", "message": "some message with the number 118", "date": "2009-11-15T14:12:12", "likes": 118} - {"index":{"_id": "119"}} - {"user": "test", "message": "some message with the number 119", "date": "2009-11-15T14:12:12", "likes": 119} -emit snippet -body part: -{ - "query": { - "match": { - "message": "some message" - } - } -} -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: conflicts=proceed -body part: -{ - "query": { - "match_all": {} - } -} -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "query": { - "match_all": {} - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: routing=1 -body part: -{ - "query": { - "range" : { - "age" : { - "gte" : 10 - } - } - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: scroll_size=5000 -body part: -{ - "query": { - "term": { - "user": "kimchy" - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -query part: detailed=true&action=*/delete/byquery -handle snippet -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -query part: requests_per_second=unlimited -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} -handle snippet -handle snippet -test snippet -emit snippet -catch part: conflict -query part: version=2 -body part: -{ - "message" : "elasticsearch now has versioning support, double cool!" -} -handle snippet -test snippet -emit snippet -query part: op_type=create -body part: -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} -handle snippet -test snippet -emit snippet -body part: -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} -handle snippet -test snippet -emit snippet -body part: -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} -handle snippet -handle snippet -test snippet -emit snippet -query part: routing=kimchy -body part: -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "tag_parent": {}, - "blog_tag": { - "_parent": { - "type": "tag_parent" - } - } - } -} -emit snippet -query part: parent=1111 -body part: -{ - "tag" : "something" -} -handle snippet -test snippet -emit snippet -query part: timestamp=2009-11-15T14:12:12 -body part: -{ - "user" : "kimchy", - "message" : "trying out Elasticsearch" -} -handle snippet -test snippet -emit snippet -query part: ttl=86400000ms -body part: -{ - "user": "kimchy", - "message": "Trying out elasticsearch, so far so good?" -} -handle snippet -test snippet -emit snippet -query part: ttl=1d -body part: -{ - "user": "kimchy", - "message": "Trying out elasticsearch, so far so good?" -} -handle snippet -test snippet -emit snippet -query part: timeout=5m -body part: -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} - {"index":{"_id": "5"}} - {"user": "test", "message": "some message with the number 5", "date": "2009-11-15T14:12:12", "likes": 5} - {"index":{"_id": "6"}} - {"user": "test", "message": "some message with the number 6", "date": "2009-11-15T14:12:12", "likes": 6} - {"index":{"_id": "7"}} - {"user": "test", "message": "some message with the number 7", "date": "2009-11-15T14:12:12", "likes": 7} - {"index":{"_id": "8"}} - {"user": "test", "message": "some message with the number 8", "date": "2009-11-15T14:12:12", "likes": 8} - {"index":{"_id": "9"}} - {"user": "test", "message": "some message with the number 9", "date": "2009-11-15T14:12:12", "likes": 9} - {"index":{"_id": "10"}} - {"user": "test", "message": "some message with the number 10", "date": "2009-11-15T14:12:12", "likes": 10} - {"index":{"_id": "11"}} - {"user": "test", "message": "some message with the number 11", "date": "2009-11-15T14:12:12", "likes": 11} - {"index":{"_id": "12"}} - {"user": "test", "message": "some message with the number 12", "date": "2009-11-15T14:12:12", "likes": 12} - {"index":{"_id": "13"}} - {"user": "test", "message": "some message with the number 13", "date": "2009-11-15T14:12:12", "likes": 13} - {"index":{"_id": "14"}} - {"user": "test", "message": "some message with the number 14", "date": "2009-11-15T14:12:12", "likes": 14} - {"index":{"_id": "15"}} - {"user": "test", "message": "some message with the number 15", "date": "2009-11-15T14:12:12", "likes": 15} - {"index":{"_id": "16"}} - {"user": "test", "message": "some message with the number 16", "date": "2009-11-15T14:12:12", "likes": 16} - {"index":{"_id": "17"}} - {"user": "test", "message": "some message with the number 17", "date": "2009-11-15T14:12:12", "likes": 17} - {"index":{"_id": "18"}} - {"user": "test", "message": "some message with the number 18", "date": "2009-11-15T14:12:12", "likes": 18} - {"index":{"_id": "19"}} - {"user": "test", "message": "some message with the number 19", "date": "2009-11-15T14:12:12", "likes": 19} - {"index":{"_id": "20"}} - {"user": "test", "message": "some message with the number 20", "date": "2009-11-15T14:12:12", "likes": 20} - {"index":{"_id": "21"}} - {"user": "test", "message": "some message with the number 21", "date": "2009-11-15T14:12:12", "likes": 21} - {"index":{"_id": "22"}} - {"user": "test", "message": "some message with the number 22", "date": "2009-11-15T14:12:12", "likes": 22} - {"index":{"_id": "23"}} - {"user": "test", "message": "some message with the number 23", "date": "2009-11-15T14:12:12", "likes": 23} - {"index":{"_id": "24"}} - {"user": "test", "message": "some message with the number 24", "date": "2009-11-15T14:12:12", "likes": 24} - {"index":{"_id": "25"}} - {"user": "test", "message": "some message with the number 25", "date": "2009-11-15T14:12:12", "likes": 25} - {"index":{"_id": "26"}} - {"user": "test", "message": "some message with the number 26", "date": "2009-11-15T14:12:12", "likes": 26} - {"index":{"_id": "27"}} - {"user": "test", "message": "some message with the number 27", "date": "2009-11-15T14:12:12", "likes": 27} - {"index":{"_id": "28"}} - {"user": "test", "message": "some message with the number 28", "date": "2009-11-15T14:12:12", "likes": 28} - {"index":{"_id": "29"}} - {"user": "test", "message": "some message with the number 29", "date": "2009-11-15T14:12:12", "likes": 29} - {"index":{"_id": "30"}} - {"user": "test", "message": "some message with the number 30", "date": "2009-11-15T14:12:12", "likes": 30} - {"index":{"_id": "31"}} - {"user": "test", "message": "some message with the number 31", "date": "2009-11-15T14:12:12", "likes": 31} - {"index":{"_id": "32"}} - {"user": "test", "message": "some message with the number 32", "date": "2009-11-15T14:12:12", "likes": 32} - {"index":{"_id": "33"}} - {"user": "test", "message": "some message with the number 33", "date": "2009-11-15T14:12:12", "likes": 33} - {"index":{"_id": "34"}} - {"user": "test", "message": "some message with the number 34", "date": "2009-11-15T14:12:12", "likes": 34} - {"index":{"_id": "35"}} - {"user": "test", "message": "some message with the number 35", "date": "2009-11-15T14:12:12", "likes": 35} - {"index":{"_id": "36"}} - {"user": "test", "message": "some message with the number 36", "date": "2009-11-15T14:12:12", "likes": 36} - {"index":{"_id": "37"}} - {"user": "test", "message": "some message with the number 37", "date": "2009-11-15T14:12:12", "likes": 37} - {"index":{"_id": "38"}} - {"user": "test", "message": "some message with the number 38", "date": "2009-11-15T14:12:12", "likes": 38} - {"index":{"_id": "39"}} - {"user": "test", "message": "some message with the number 39", "date": "2009-11-15T14:12:12", "likes": 39} - {"index":{"_id": "40"}} - {"user": "test", "message": "some message with the number 40", "date": "2009-11-15T14:12:12", "likes": 40} - {"index":{"_id": "41"}} - {"user": "test", "message": "some message with the number 41", "date": "2009-11-15T14:12:12", "likes": 41} - {"index":{"_id": "42"}} - {"user": "test", "message": "some message with the number 42", "date": "2009-11-15T14:12:12", "likes": 42} - {"index":{"_id": "43"}} - {"user": "test", "message": "some message with the number 43", "date": "2009-11-15T14:12:12", "likes": 43} - {"index":{"_id": "44"}} - {"user": "test", "message": "some message with the number 44", "date": "2009-11-15T14:12:12", "likes": 44} - {"index":{"_id": "45"}} - {"user": "test", "message": "some message with the number 45", "date": "2009-11-15T14:12:12", "likes": 45} - {"index":{"_id": "46"}} - {"user": "test", "message": "some message with the number 46", "date": "2009-11-15T14:12:12", "likes": 46} - {"index":{"_id": "47"}} - {"user": "test", "message": "some message with the number 47", "date": "2009-11-15T14:12:12", "likes": 47} - {"index":{"_id": "48"}} - {"user": "test", "message": "some message with the number 48", "date": "2009-11-15T14:12:12", "likes": 48} - {"index":{"_id": "49"}} - {"user": "test", "message": "some message with the number 49", "date": "2009-11-15T14:12:12", "likes": 49} - {"index":{"_id": "50"}} - {"user": "test", "message": "some message with the number 50", "date": "2009-11-15T14:12:12", "likes": 50} - {"index":{"_id": "51"}} - {"user": "test", "message": "some message with the number 51", "date": "2009-11-15T14:12:12", "likes": 51} - {"index":{"_id": "52"}} - {"user": "test", "message": "some message with the number 52", "date": "2009-11-15T14:12:12", "likes": 52} - {"index":{"_id": "53"}} - {"user": "test", "message": "some message with the number 53", "date": "2009-11-15T14:12:12", "likes": 53} - {"index":{"_id": "54"}} - {"user": "test", "message": "some message with the number 54", "date": "2009-11-15T14:12:12", "likes": 54} - {"index":{"_id": "55"}} - {"user": "test", "message": "some message with the number 55", "date": "2009-11-15T14:12:12", "likes": 55} - {"index":{"_id": "56"}} - {"user": "test", "message": "some message with the number 56", "date": "2009-11-15T14:12:12", "likes": 56} - {"index":{"_id": "57"}} - {"user": "test", "message": "some message with the number 57", "date": "2009-11-15T14:12:12", "likes": 57} - {"index":{"_id": "58"}} - {"user": "test", "message": "some message with the number 58", "date": "2009-11-15T14:12:12", "likes": 58} - {"index":{"_id": "59"}} - {"user": "test", "message": "some message with the number 59", "date": "2009-11-15T14:12:12", "likes": 59} - {"index":{"_id": "60"}} - {"user": "test", "message": "some message with the number 60", "date": "2009-11-15T14:12:12", "likes": 60} - {"index":{"_id": "61"}} - {"user": "test", "message": "some message with the number 61", "date": "2009-11-15T14:12:12", "likes": 61} - {"index":{"_id": "62"}} - {"user": "test", "message": "some message with the number 62", "date": "2009-11-15T14:12:12", "likes": 62} - {"index":{"_id": "63"}} - {"user": "test", "message": "some message with the number 63", "date": "2009-11-15T14:12:12", "likes": 63} - {"index":{"_id": "64"}} - {"user": "test", "message": "some message with the number 64", "date": "2009-11-15T14:12:12", "likes": 64} - {"index":{"_id": "65"}} - {"user": "test", "message": "some message with the number 65", "date": "2009-11-15T14:12:12", "likes": 65} - {"index":{"_id": "66"}} - {"user": "test", "message": "some message with the number 66", "date": "2009-11-15T14:12:12", "likes": 66} - {"index":{"_id": "67"}} - {"user": "test", "message": "some message with the number 67", "date": "2009-11-15T14:12:12", "likes": 67} - {"index":{"_id": "68"}} - {"user": "test", "message": "some message with the number 68", "date": "2009-11-15T14:12:12", "likes": 68} - {"index":{"_id": "69"}} - {"user": "test", "message": "some message with the number 69", "date": "2009-11-15T14:12:12", "likes": 69} - {"index":{"_id": "70"}} - {"user": "test", "message": "some message with the number 70", "date": "2009-11-15T14:12:12", "likes": 70} - {"index":{"_id": "71"}} - {"user": "test", "message": "some message with the number 71", "date": "2009-11-15T14:12:12", "likes": 71} - {"index":{"_id": "72"}} - {"user": "test", "message": "some message with the number 72", "date": "2009-11-15T14:12:12", "likes": 72} - {"index":{"_id": "73"}} - {"user": "test", "message": "some message with the number 73", "date": "2009-11-15T14:12:12", "likes": 73} - {"index":{"_id": "74"}} - {"user": "test", "message": "some message with the number 74", "date": "2009-11-15T14:12:12", "likes": 74} - {"index":{"_id": "75"}} - {"user": "test", "message": "some message with the number 75", "date": "2009-11-15T14:12:12", "likes": 75} - {"index":{"_id": "76"}} - {"user": "test", "message": "some message with the number 76", "date": "2009-11-15T14:12:12", "likes": 76} - {"index":{"_id": "77"}} - {"user": "test", "message": "some message with the number 77", "date": "2009-11-15T14:12:12", "likes": 77} - {"index":{"_id": "78"}} - {"user": "test", "message": "some message with the number 78", "date": "2009-11-15T14:12:12", "likes": 78} - {"index":{"_id": "79"}} - {"user": "test", "message": "some message with the number 79", "date": "2009-11-15T14:12:12", "likes": 79} - {"index":{"_id": "80"}} - {"user": "test", "message": "some message with the number 80", "date": "2009-11-15T14:12:12", "likes": 80} - {"index":{"_id": "81"}} - {"user": "test", "message": "some message with the number 81", "date": "2009-11-15T14:12:12", "likes": 81} - {"index":{"_id": "82"}} - {"user": "test", "message": "some message with the number 82", "date": "2009-11-15T14:12:12", "likes": 82} - {"index":{"_id": "83"}} - {"user": "test", "message": "some message with the number 83", "date": "2009-11-15T14:12:12", "likes": 83} - {"index":{"_id": "84"}} - {"user": "test", "message": "some message with the number 84", "date": "2009-11-15T14:12:12", "likes": 84} - {"index":{"_id": "85"}} - {"user": "test", "message": "some message with the number 85", "date": "2009-11-15T14:12:12", "likes": 85} - {"index":{"_id": "86"}} - {"user": "test", "message": "some message with the number 86", "date": "2009-11-15T14:12:12", "likes": 86} - {"index":{"_id": "87"}} - {"user": "test", "message": "some message with the number 87", "date": "2009-11-15T14:12:12", "likes": 87} - {"index":{"_id": "88"}} - {"user": "test", "message": "some message with the number 88", "date": "2009-11-15T14:12:12", "likes": 88} - {"index":{"_id": "89"}} - {"user": "test", "message": "some message with the number 89", "date": "2009-11-15T14:12:12", "likes": 89} - {"index":{"_id": "90"}} - {"user": "test", "message": "some message with the number 90", "date": "2009-11-15T14:12:12", "likes": 90} - {"index":{"_id": "91"}} - {"user": "test", "message": "some message with the number 91", "date": "2009-11-15T14:12:12", "likes": 91} - {"index":{"_id": "92"}} - {"user": "test", "message": "some message with the number 92", "date": "2009-11-15T14:12:12", "likes": 92} - {"index":{"_id": "93"}} - {"user": "test", "message": "some message with the number 93", "date": "2009-11-15T14:12:12", "likes": 93} - {"index":{"_id": "94"}} - {"user": "test", "message": "some message with the number 94", "date": "2009-11-15T14:12:12", "likes": 94} - {"index":{"_id": "95"}} - {"user": "test", "message": "some message with the number 95", "date": "2009-11-15T14:12:12", "likes": 95} - {"index":{"_id": "96"}} - {"user": "test", "message": "some message with the number 96", "date": "2009-11-15T14:12:12", "likes": 96} - {"index":{"_id": "97"}} - {"user": "test", "message": "some message with the number 97", "date": "2009-11-15T14:12:12", "likes": 97} - {"index":{"_id": "98"}} - {"user": "test", "message": "some message with the number 98", "date": "2009-11-15T14:12:12", "likes": 98} - {"index":{"_id": "99"}} - {"user": "test", "message": "some message with the number 99", "date": "2009-11-15T14:12:12", "likes": 99} - {"index":{"_id": "100"}} - {"user": "test", "message": "some message with the number 100", "date": "2009-11-15T14:12:12", "likes": 100} - {"index":{"_id": "101"}} - {"user": "test", "message": "some message with the number 101", "date": "2009-11-15T14:12:12", "likes": 101} - {"index":{"_id": "102"}} - {"user": "test", "message": "some message with the number 102", "date": "2009-11-15T14:12:12", "likes": 102} - {"index":{"_id": "103"}} - {"user": "test", "message": "some message with the number 103", "date": "2009-11-15T14:12:12", "likes": 103} - {"index":{"_id": "104"}} - {"user": "test", "message": "some message with the number 104", "date": "2009-11-15T14:12:12", "likes": 104} - {"index":{"_id": "105"}} - {"user": "test", "message": "some message with the number 105", "date": "2009-11-15T14:12:12", "likes": 105} - {"index":{"_id": "106"}} - {"user": "test", "message": "some message with the number 106", "date": "2009-11-15T14:12:12", "likes": 106} - {"index":{"_id": "107"}} - {"user": "test", "message": "some message with the number 107", "date": "2009-11-15T14:12:12", "likes": 107} - {"index":{"_id": "108"}} - {"user": "test", "message": "some message with the number 108", "date": "2009-11-15T14:12:12", "likes": 108} - {"index":{"_id": "109"}} - {"user": "test", "message": "some message with the number 109", "date": "2009-11-15T14:12:12", "likes": 109} - {"index":{"_id": "110"}} - {"user": "test", "message": "some message with the number 110", "date": "2009-11-15T14:12:12", "likes": 110} - {"index":{"_id": "111"}} - {"user": "test", "message": "some message with the number 111", "date": "2009-11-15T14:12:12", "likes": 111} - {"index":{"_id": "112"}} - {"user": "test", "message": "some message with the number 112", "date": "2009-11-15T14:12:12", "likes": 112} - {"index":{"_id": "113"}} - {"user": "test", "message": "some message with the number 113", "date": "2009-11-15T14:12:12", "likes": 113} - {"index":{"_id": "114"}} - {"user": "test", "message": "some message with the number 114", "date": "2009-11-15T14:12:12", "likes": 114} - {"index":{"_id": "115"}} - {"user": "test", "message": "some message with the number 115", "date": "2009-11-15T14:12:12", "likes": 115} - {"index":{"_id": "116"}} - {"user": "test", "message": "some message with the number 116", "date": "2009-11-15T14:12:12", "likes": 116} - {"index":{"_id": "117"}} - {"user": "test", "message": "some message with the number 117", "date": "2009-11-15T14:12:12", "likes": 117} - {"index":{"_id": "118"}} - {"user": "test", "message": "some message with the number 118", "date": "2009-11-15T14:12:12", "likes": 118} - {"index":{"_id": "119"}} - {"user": "test", "message": "some message with the number 119", "date": "2009-11-15T14:12:12", "likes": 119} -emit snippet -query part: conflicts=proceed -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: conflicts=proceed -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: conflicts=proceed -body part: -{ - "query": { - "term": { - "user": "kimchy" - } - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "script": { - "inline": "ctx._source.likes++" - }, - "query": { - "term": { - "user": "kimchy" - } - } -} -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -query part: wait_for_status=yellow -emit snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: routing=1 -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -query part: scroll_size=100 -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "description" : "sets foo", - "processors" : [ { - "set" : { - "field": "foo", - "value": "bar" - } - } ] -} -emit snippet -query part: pipeline=set-foo -handle snippet -handle snippet -test snippet -emit snippet -query part: detailed=true&action=*byquery -handle snippet -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -query part: requests_per_second=unlimited -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "test": { - "dynamic": false, - "properties": { - "text": {"type": "text"} - } - } - } -} -emit snippet -query part: refresh -body part: -{ - "text": "words words", - "flag": "bar" -} -emit snippet -query part: refresh -body part: -{ - "text": "words words", - "flag": "foo" -} -emit snippet -body part: -{ - "properties": { - "text": {"type": "text"}, - "flag": {"type": "text", "analyzer": "keyword"} - } -} -handle snippet -test snippet -emit snippet -query part: filter_path=hits.total -body part: -{ - "query": { - "match": { - "flag": "foo" - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -query part: refresh&conflicts=proceed -emit snippet -query part: filter_path=hits.total -body part: -{ - "query": { - "match": { - "flag": "foo" - } - } -} -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} - {"index":{"_id": "5"}} - {"user": "test", "message": "some message with the number 5", "date": "2009-11-15T14:12:12", "likes": 5} - {"index":{"_id": "6"}} - {"user": "test", "message": "some message with the number 6", "date": "2009-11-15T14:12:12", "likes": 6} - {"index":{"_id": "7"}} - {"user": "test", "message": "some message with the number 7", "date": "2009-11-15T14:12:12", "likes": 7} - {"index":{"_id": "8"}} - {"user": "test", "message": "some message with the number 8", "date": "2009-11-15T14:12:12", "likes": 8} - {"index":{"_id": "9"}} - {"user": "test", "message": "some message with the number 9", "date": "2009-11-15T14:12:12", "likes": 9} - {"index":{"_id": "10"}} - {"user": "test", "message": "some message with the number 10", "date": "2009-11-15T14:12:12", "likes": 10} - {"index":{"_id": "11"}} - {"user": "test", "message": "some message with the number 11", "date": "2009-11-15T14:12:12", "likes": 11} - {"index":{"_id": "12"}} - {"user": "test", "message": "some message with the number 12", "date": "2009-11-15T14:12:12", "likes": 12} - {"index":{"_id": "13"}} - {"user": "test", "message": "some message with the number 13", "date": "2009-11-15T14:12:12", "likes": 13} - {"index":{"_id": "14"}} - {"user": "test", "message": "some message with the number 14", "date": "2009-11-15T14:12:12", "likes": 14} - {"index":{"_id": "15"}} - {"user": "test", "message": "some message with the number 15", "date": "2009-11-15T14:12:12", "likes": 15} - {"index":{"_id": "16"}} - {"user": "test", "message": "some message with the number 16", "date": "2009-11-15T14:12:12", "likes": 16} - {"index":{"_id": "17"}} - {"user": "test", "message": "some message with the number 17", "date": "2009-11-15T14:12:12", "likes": 17} - {"index":{"_id": "18"}} - {"user": "test", "message": "some message with the number 18", "date": "2009-11-15T14:12:12", "likes": 18} - {"index":{"_id": "19"}} - {"user": "test", "message": "some message with the number 19", "date": "2009-11-15T14:12:12", "likes": 19} - {"index":{"_id": "20"}} - {"user": "test", "message": "some message with the number 20", "date": "2009-11-15T14:12:12", "likes": 20} - {"index":{"_id": "21"}} - {"user": "test", "message": "some message with the number 21", "date": "2009-11-15T14:12:12", "likes": 21} - {"index":{"_id": "22"}} - {"user": "test", "message": "some message with the number 22", "date": "2009-11-15T14:12:12", "likes": 22} - {"index":{"_id": "23"}} - {"user": "test", "message": "some message with the number 23", "date": "2009-11-15T14:12:12", "likes": 23} - {"index":{"_id": "24"}} - {"user": "test", "message": "some message with the number 24", "date": "2009-11-15T14:12:12", "likes": 24} - {"index":{"_id": "25"}} - {"user": "test", "message": "some message with the number 25", "date": "2009-11-15T14:12:12", "likes": 25} - {"index":{"_id": "26"}} - {"user": "test", "message": "some message with the number 26", "date": "2009-11-15T14:12:12", "likes": 26} - {"index":{"_id": "27"}} - {"user": "test", "message": "some message with the number 27", "date": "2009-11-15T14:12:12", "likes": 27} - {"index":{"_id": "28"}} - {"user": "test", "message": "some message with the number 28", "date": "2009-11-15T14:12:12", "likes": 28} - {"index":{"_id": "29"}} - {"user": "test", "message": "some message with the number 29", "date": "2009-11-15T14:12:12", "likes": 29} - {"index":{"_id": "30"}} - {"user": "test", "message": "some message with the number 30", "date": "2009-11-15T14:12:12", "likes": 30} - {"index":{"_id": "31"}} - {"user": "test", "message": "some message with the number 31", "date": "2009-11-15T14:12:12", "likes": 31} - {"index":{"_id": "32"}} - {"user": "test", "message": "some message with the number 32", "date": "2009-11-15T14:12:12", "likes": 32} - {"index":{"_id": "33"}} - {"user": "test", "message": "some message with the number 33", "date": "2009-11-15T14:12:12", "likes": 33} - {"index":{"_id": "34"}} - {"user": "test", "message": "some message with the number 34", "date": "2009-11-15T14:12:12", "likes": 34} - {"index":{"_id": "35"}} - {"user": "test", "message": "some message with the number 35", "date": "2009-11-15T14:12:12", "likes": 35} - {"index":{"_id": "36"}} - {"user": "test", "message": "some message with the number 36", "date": "2009-11-15T14:12:12", "likes": 36} - {"index":{"_id": "37"}} - {"user": "test", "message": "some message with the number 37", "date": "2009-11-15T14:12:12", "likes": 37} - {"index":{"_id": "38"}} - {"user": "test", "message": "some message with the number 38", "date": "2009-11-15T14:12:12", "likes": 38} - {"index":{"_id": "39"}} - {"user": "test", "message": "some message with the number 39", "date": "2009-11-15T14:12:12", "likes": 39} - {"index":{"_id": "40"}} - {"user": "test", "message": "some message with the number 40", "date": "2009-11-15T14:12:12", "likes": 40} - {"index":{"_id": "41"}} - {"user": "test", "message": "some message with the number 41", "date": "2009-11-15T14:12:12", "likes": 41} - {"index":{"_id": "42"}} - {"user": "test", "message": "some message with the number 42", "date": "2009-11-15T14:12:12", "likes": 42} - {"index":{"_id": "43"}} - {"user": "test", "message": "some message with the number 43", "date": "2009-11-15T14:12:12", "likes": 43} - {"index":{"_id": "44"}} - {"user": "test", "message": "some message with the number 44", "date": "2009-11-15T14:12:12", "likes": 44} - {"index":{"_id": "45"}} - {"user": "test", "message": "some message with the number 45", "date": "2009-11-15T14:12:12", "likes": 45} - {"index":{"_id": "46"}} - {"user": "test", "message": "some message with the number 46", "date": "2009-11-15T14:12:12", "likes": 46} - {"index":{"_id": "47"}} - {"user": "test", "message": "some message with the number 47", "date": "2009-11-15T14:12:12", "likes": 47} - {"index":{"_id": "48"}} - {"user": "test", "message": "some message with the number 48", "date": "2009-11-15T14:12:12", "likes": 48} - {"index":{"_id": "49"}} - {"user": "test", "message": "some message with the number 49", "date": "2009-11-15T14:12:12", "likes": 49} - {"index":{"_id": "50"}} - {"user": "test", "message": "some message with the number 50", "date": "2009-11-15T14:12:12", "likes": 50} - {"index":{"_id": "51"}} - {"user": "test", "message": "some message with the number 51", "date": "2009-11-15T14:12:12", "likes": 51} - {"index":{"_id": "52"}} - {"user": "test", "message": "some message with the number 52", "date": "2009-11-15T14:12:12", "likes": 52} - {"index":{"_id": "53"}} - {"user": "test", "message": "some message with the number 53", "date": "2009-11-15T14:12:12", "likes": 53} - {"index":{"_id": "54"}} - {"user": "test", "message": "some message with the number 54", "date": "2009-11-15T14:12:12", "likes": 54} - {"index":{"_id": "55"}} - {"user": "test", "message": "some message with the number 55", "date": "2009-11-15T14:12:12", "likes": 55} - {"index":{"_id": "56"}} - {"user": "test", "message": "some message with the number 56", "date": "2009-11-15T14:12:12", "likes": 56} - {"index":{"_id": "57"}} - {"user": "test", "message": "some message with the number 57", "date": "2009-11-15T14:12:12", "likes": 57} - {"index":{"_id": "58"}} - {"user": "test", "message": "some message with the number 58", "date": "2009-11-15T14:12:12", "likes": 58} - {"index":{"_id": "59"}} - {"user": "test", "message": "some message with the number 59", "date": "2009-11-15T14:12:12", "likes": 59} - {"index":{"_id": "60"}} - {"user": "test", "message": "some message with the number 60", "date": "2009-11-15T14:12:12", "likes": 60} - {"index":{"_id": "61"}} - {"user": "test", "message": "some message with the number 61", "date": "2009-11-15T14:12:12", "likes": 61} - {"index":{"_id": "62"}} - {"user": "test", "message": "some message with the number 62", "date": "2009-11-15T14:12:12", "likes": 62} - {"index":{"_id": "63"}} - {"user": "test", "message": "some message with the number 63", "date": "2009-11-15T14:12:12", "likes": 63} - {"index":{"_id": "64"}} - {"user": "test", "message": "some message with the number 64", "date": "2009-11-15T14:12:12", "likes": 64} - {"index":{"_id": "65"}} - {"user": "test", "message": "some message with the number 65", "date": "2009-11-15T14:12:12", "likes": 65} - {"index":{"_id": "66"}} - {"user": "test", "message": "some message with the number 66", "date": "2009-11-15T14:12:12", "likes": 66} - {"index":{"_id": "67"}} - {"user": "test", "message": "some message with the number 67", "date": "2009-11-15T14:12:12", "likes": 67} - {"index":{"_id": "68"}} - {"user": "test", "message": "some message with the number 68", "date": "2009-11-15T14:12:12", "likes": 68} - {"index":{"_id": "69"}} - {"user": "test", "message": "some message with the number 69", "date": "2009-11-15T14:12:12", "likes": 69} - {"index":{"_id": "70"}} - {"user": "test", "message": "some message with the number 70", "date": "2009-11-15T14:12:12", "likes": 70} - {"index":{"_id": "71"}} - {"user": "test", "message": "some message with the number 71", "date": "2009-11-15T14:12:12", "likes": 71} - {"index":{"_id": "72"}} - {"user": "test", "message": "some message with the number 72", "date": "2009-11-15T14:12:12", "likes": 72} - {"index":{"_id": "73"}} - {"user": "test", "message": "some message with the number 73", "date": "2009-11-15T14:12:12", "likes": 73} - {"index":{"_id": "74"}} - {"user": "test", "message": "some message with the number 74", "date": "2009-11-15T14:12:12", "likes": 74} - {"index":{"_id": "75"}} - {"user": "test", "message": "some message with the number 75", "date": "2009-11-15T14:12:12", "likes": 75} - {"index":{"_id": "76"}} - {"user": "test", "message": "some message with the number 76", "date": "2009-11-15T14:12:12", "likes": 76} - {"index":{"_id": "77"}} - {"user": "test", "message": "some message with the number 77", "date": "2009-11-15T14:12:12", "likes": 77} - {"index":{"_id": "78"}} - {"user": "test", "message": "some message with the number 78", "date": "2009-11-15T14:12:12", "likes": 78} - {"index":{"_id": "79"}} - {"user": "test", "message": "some message with the number 79", "date": "2009-11-15T14:12:12", "likes": 79} - {"index":{"_id": "80"}} - {"user": "test", "message": "some message with the number 80", "date": "2009-11-15T14:12:12", "likes": 80} - {"index":{"_id": "81"}} - {"user": "test", "message": "some message with the number 81", "date": "2009-11-15T14:12:12", "likes": 81} - {"index":{"_id": "82"}} - {"user": "test", "message": "some message with the number 82", "date": "2009-11-15T14:12:12", "likes": 82} - {"index":{"_id": "83"}} - {"user": "test", "message": "some message with the number 83", "date": "2009-11-15T14:12:12", "likes": 83} - {"index":{"_id": "84"}} - {"user": "test", "message": "some message with the number 84", "date": "2009-11-15T14:12:12", "likes": 84} - {"index":{"_id": "85"}} - {"user": "test", "message": "some message with the number 85", "date": "2009-11-15T14:12:12", "likes": 85} - {"index":{"_id": "86"}} - {"user": "test", "message": "some message with the number 86", "date": "2009-11-15T14:12:12", "likes": 86} - {"index":{"_id": "87"}} - {"user": "test", "message": "some message with the number 87", "date": "2009-11-15T14:12:12", "likes": 87} - {"index":{"_id": "88"}} - {"user": "test", "message": "some message with the number 88", "date": "2009-11-15T14:12:12", "likes": 88} - {"index":{"_id": "89"}} - {"user": "test", "message": "some message with the number 89", "date": "2009-11-15T14:12:12", "likes": 89} - {"index":{"_id": "90"}} - {"user": "test", "message": "some message with the number 90", "date": "2009-11-15T14:12:12", "likes": 90} - {"index":{"_id": "91"}} - {"user": "test", "message": "some message with the number 91", "date": "2009-11-15T14:12:12", "likes": 91} - {"index":{"_id": "92"}} - {"user": "test", "message": "some message with the number 92", "date": "2009-11-15T14:12:12", "likes": 92} - {"index":{"_id": "93"}} - {"user": "test", "message": "some message with the number 93", "date": "2009-11-15T14:12:12", "likes": 93} - {"index":{"_id": "94"}} - {"user": "test", "message": "some message with the number 94", "date": "2009-11-15T14:12:12", "likes": 94} - {"index":{"_id": "95"}} - {"user": "test", "message": "some message with the number 95", "date": "2009-11-15T14:12:12", "likes": 95} - {"index":{"_id": "96"}} - {"user": "test", "message": "some message with the number 96", "date": "2009-11-15T14:12:12", "likes": 96} - {"index":{"_id": "97"}} - {"user": "test", "message": "some message with the number 97", "date": "2009-11-15T14:12:12", "likes": 97} - {"index":{"_id": "98"}} - {"user": "test", "message": "some message with the number 98", "date": "2009-11-15T14:12:12", "likes": 98} - {"index":{"_id": "99"}} - {"user": "test", "message": "some message with the number 99", "date": "2009-11-15T14:12:12", "likes": 99} - {"index":{"_id": "100"}} - {"user": "test", "message": "some message with the number 100", "date": "2009-11-15T14:12:12", "likes": 100} - {"index":{"_id": "101"}} - {"user": "test", "message": "some message with the number 101", "date": "2009-11-15T14:12:12", "likes": 101} - {"index":{"_id": "102"}} - {"user": "test", "message": "some message with the number 102", "date": "2009-11-15T14:12:12", "likes": 102} - {"index":{"_id": "103"}} - {"user": "test", "message": "some message with the number 103", "date": "2009-11-15T14:12:12", "likes": 103} - {"index":{"_id": "104"}} - {"user": "test", "message": "some message with the number 104", "date": "2009-11-15T14:12:12", "likes": 104} - {"index":{"_id": "105"}} - {"user": "test", "message": "some message with the number 105", "date": "2009-11-15T14:12:12", "likes": 105} - {"index":{"_id": "106"}} - {"user": "test", "message": "some message with the number 106", "date": "2009-11-15T14:12:12", "likes": 106} - {"index":{"_id": "107"}} - {"user": "test", "message": "some message with the number 107", "date": "2009-11-15T14:12:12", "likes": 107} - {"index":{"_id": "108"}} - {"user": "test", "message": "some message with the number 108", "date": "2009-11-15T14:12:12", "likes": 108} - {"index":{"_id": "109"}} - {"user": "test", "message": "some message with the number 109", "date": "2009-11-15T14:12:12", "likes": 109} - {"index":{"_id": "110"}} - {"user": "test", "message": "some message with the number 110", "date": "2009-11-15T14:12:12", "likes": 110} - {"index":{"_id": "111"}} - {"user": "test", "message": "some message with the number 111", "date": "2009-11-15T14:12:12", "likes": 111} - {"index":{"_id": "112"}} - {"user": "test", "message": "some message with the number 112", "date": "2009-11-15T14:12:12", "likes": 112} - {"index":{"_id": "113"}} - {"user": "test", "message": "some message with the number 113", "date": "2009-11-15T14:12:12", "likes": 113} - {"index":{"_id": "114"}} - {"user": "test", "message": "some message with the number 114", "date": "2009-11-15T14:12:12", "likes": 114} - {"index":{"_id": "115"}} - {"user": "test", "message": "some message with the number 115", "date": "2009-11-15T14:12:12", "likes": 115} - {"index":{"_id": "116"}} - {"user": "test", "message": "some message with the number 116", "date": "2009-11-15T14:12:12", "likes": 116} - {"index":{"_id": "117"}} - {"user": "test", "message": "some message with the number 117", "date": "2009-11-15T14:12:12", "likes": 117} - {"index":{"_id": "118"}} - {"user": "test", "message": "some message with the number 118", "date": "2009-11-15T14:12:12", "likes": 118} - {"index":{"_id": "119"}} - {"user": "test", "message": "some message with the number 119", "date": "2009-11-15T14:12:12", "likes": 119} -emit snippet -body part: -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter" - } -} -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "version_type": "internal" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "version_type": "external" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "op_type": "create" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "conflicts": "proceed", - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "op_type": "create" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "source": { - "index": "twitter", - "type": "tweet", - "query": { - "term": { - "user": "kimchy" - } - } - }, - "dest": { - "index": "new_twitter" - } -} -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "source": { - "index": ["twitter", "blog"], - "type": ["tweet", "post"] - }, - "dest": { - "index": "all_together" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "size": 1, - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "size": 10000, - "source": { - "index": "twitter", - "sort": { "date": "desc" } - }, - "dest": { - "index": "new_twitter" - } -} -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -body part: -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "version_type": "external" - }, - "script": { - "inline": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}" - } -} -handle snippet -test snippet -emit snippet -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "source": { - "index": "source", - "query": { - "match": { - "company": "cat" - } - } - }, - "dest": { - "index": "dest", - "routing": "=cat" - } -} -handle snippet -test snippet -emit snippet -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "source": { - "index": "source", - "size": 100 - }, - "dest": { - "index": "dest", - "routing": "=cat" - } -} -handle snippet -test snippet -emit snippet -emit snippet -query part: wait_for_status=yellow -emit snippet -body part: -{ - "source": { - "index": "source" - }, - "dest": { - "index": "dest", - "pipeline": "some_ingest_pipeline" - } -} -handle snippet -handle snippet -test snippet -emit snippet -query part: detailed=true&actions=*reindex -handle snippet -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -query part: requests_per_second=unlimited -handle snippet -test snippet -emit snippet -query part: refresh -body part: -{ - "text": "words words", - "flag": "foo" -} -handle snippet -test snippet -emit snippet -body part: -{ - "source": { - "index": "test" - }, - "dest": { - "index": "test2" - }, - "script": { - "inline": "ctx._source.tag = ctx._source.remove(\"flag\")" - } -} -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -catch part: request -query part: pipeline=my_pipeline_id -body part: -{ - "foo": "bar" -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "transient": { - "cluster.routing.allocation.enable": "none" - } -} -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "transient": { - "cluster.routing.allocation.enable": "all" - } -} -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "persistent": { - "cluster.routing.allocation.enable": "none" - } -} -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "persistent": { - "cluster.routing.allocation.enable": "all" - } -} -handle snippet -test snippet -emit snippet -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "user": { - "_all": { "enabled": false }, - "properties": { - "title": { "type": "text" }, - "name": { "type": "text" }, - "age": { "type": "integer" } - } - }, - "blogpost": { - "_all": { "enabled": false }, - "properties": { - "title": { "type": "text" }, - "body": { "type": "text" }, - "user_id": { - "type": "keyword" - }, - "created": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ "count": 5 } -handle snippet -test snippet -emit snippet -body part: -{ - "index.mapper.dynamic":false -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "session": { - "properties": { - "user_id": { - "type": "keyword" - }, - "last_updated": { - "type": "date" - }, - "session_data": { - "enabled": false - } - } - } - } -} -emit snippet -body part: -{ - "user_id": "kimchy", - "session_data": { - "arbitrary_object": { - "some_array": [ "foo", "bar", { "baz": 2 } ] - } - }, - "last_updated": "2015-12-06T18:20:22" -} -emit snippet -body part: -{ - "user_id": "jpountz", - "session_data": "none", - "last_updated": "2015-12-06T18:22:13" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "session": { - "enabled": false - } - } -} -emit snippet -body part: -{ - "user_id": "kimchy", - "session_data": { - "arbitrary_object": { - "some_array": [ "foo", "bar", { "baz": 2 } ] - } - }, - "last_updated": "2015-12-06T18:20:22" -} -emit snippet -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "location": { - "type": "geo_point", - "geohash_prefix": true, - "geohash_precision": 6 - } - } - } - } -} -emit snippet -body part: -{ - "location": { - "lat": 41.12, - "lon": -71.34 - } -} -emit snippet -query part: fielddata_fields=location.geohash -body part: -{ - "query": { - "term": { - "location.geohash": "drm3bt" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "first_name": { - "type": "text", - "copy_to": "full_name" - }, - "last_name": { - "type": "text", - "copy_to": "full_name" - }, - "full_name": { - "type": "text" - } - } - } - } -} -emit snippet -body part: -{ - "first_name": "John", - "last_name": "Smith" -} -emit snippet -body part: -{ - "query": { - "match": { - "full_name": { - "query": "John Smith", - "operator": "and" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "title": { - "type": "text", - "store": true - }, - "date": { - "type": "date", - "store": true - }, - "content": { - "type": "text" - } - } - } - } -} -emit snippet -body part: -{ - "title": "Some short title", - "date": "2015-01-01", - "content": "A very long content field..." -} -emit snippet -body part: -{ - "fields": [ "title", "date" ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "status_code": { - "type": "keyword", - "null_value": "NULL" - } - } - } - } -} -emit snippet -body part: -{ - "status_code": null -} -emit snippet -body part: -{ - "status_code": [] -} -emit snippet -body part: -{ - "query": { - "term": { - "status_code": "NULL" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "number_one": { - "type": "integer" - }, - "number_two": { - "type": "integer", - "coerce": false - } - } - } - } -} -emit snippet -body part: -{ - "number_one": "10" -} -emit snippet -catch part: request -body part: -{ - "number_two": "10" -} -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "index.mapping.coerce": false - }, - "mappings": { - "my_type": { - "properties": { - "number_one": { - "type": "integer", - "coerce": true - }, - "number_two": { - "type": "integer" - } - } - } - } -} -emit snippet -body part: -{ "number_one": "10" } -emit snippet -catch part: request -body part: -{ "number_two": "10" } -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "analysis": { - "filter": { - "autocomplete_filter": { - "type": "edge_ngram", - "min_gram": 1, - "max_gram": 20 - } - }, - "analyzer": { - "autocomplete": { - "type": "custom", - "tokenizer": "standard", - "filter": [ - "lowercase", - "autocomplete_filter" - ] - } - } - } - }, - "mappings": { - "my_type": { - "properties": { - "text": { - "type": "text", - "analyzer": "autocomplete", - "search_analyzer": "standard" - } - } - } - } -} -emit snippet -body part: -{ - "text": "Quick Brown Fox" -} -emit snippet -body part: -{ - "query": { - "match": { - "text": { - "query": "Quick Br", - "operator": "and" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets" - } - } - } - } -} -emit snippet -body part: -{ - "text": "Quick brown fox" -} -emit snippet -body part: -{ - "query": { - "match": { - "text": "brown fox" - } - }, - "highlight": { - "fields": { - "text": {} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "city": { - "type": "text", - "fields": { - "raw": { - "type": "keyword" - } - } - } - } - } - } -} -emit snippet -body part: -{ - "city": "New York" -} -emit snippet -body part: -{ - "city": "York" -} -emit snippet -body part: -{ - "query": { - "match": { - "city": "york" - } - }, - "sort": { - "city.raw": "asc" - }, - "aggs": { - "Cities": { - "terms": { - "field": "city.raw" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "text": { - "type": "text", - "fields": { - "english": { - "type": "text", - "analyzer": "english" - } - } - } - } - } - } -} -emit snippet -body part: -{ "text": "quick brown fox" } -emit snippet -body part: -{ "text": "quick brown foxes" } -emit snippet -body part: -{ - "query": { - "multi_match": { - "query": "quick brown foxes", - "fields": [ - "text", - "text.english" - ], - "type": "most_fields" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "tag": { - "type": "text", - "fielddata": true, - "fielddata_frequency_filter": { - "min": 0.001, - "max": 0.1, - "min_segment_size": 500 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "number_one": { - "type": "integer", - "ignore_malformed": true - }, - "number_two": { - "type": "integer" - } - } - } - } -} -emit snippet -body part: -{ - "text": "Some text value", - "number_one": "foo" -} -emit snippet -catch part: request -body part: -{ - "text": "Some text value", - "number_two": "foo" -} -handle snippet -test snippet -emit snippet -body part: -{ - "settings": { - "index.mapping.ignore_malformed": true - }, - "mappings": { - "my_type": { - "properties": { - "number_one": { - "type": "byte" - }, - "number_two": { - "type": "integer", - "ignore_malformed": false - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "username": "johnsmith", - "name": { - "first": "John", - "last": "Smith" - } -} -emit snippet -emit snippet -body part: -{ - "username": "marywhite", - "email": "mary@white.com", - "name": { - "first": "Mary", - "middle": "Alice", - "last": "White" - } -} -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "dynamic": false, - "properties": { - "user": { - "properties": { - "name": { - "type": "text" - }, - "social_networks": { - "dynamic": true, - "properties": {} - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "names": [ "John Abraham", "Lincoln Smith"] -} -emit snippet -body part: -{ - "query": { - "match_phrase": { - "names": { - "query": "Abraham Lincoln" - } - } - } -} -emit snippet -body part: -{ - "query": { - "match_phrase": { - "names": { - "query": "Abraham Lincoln", - "slop": 101 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "groups": { - "properties": { - "names": { - "type": "text", - "position_increment_gap": 0 - } - } - } - } -} -emit snippet -body part: -{ - "names": [ "John Abraham", "Lincoln Smith"] -} -emit snippet -body part: -{ - "query": { - "match_phrase": { - "names": "Abraham Lincoln" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "message": { - "type": "keyword", - "ignore_above": 20 - } - } - } - } -} -emit snippet -body part: -{ - "message": "Syntax error" -} -emit snippet -body part: -{ - "message": "Syntax error with some long stacktrace" -} -emit snippet -body part: -{ - "aggs": { - "messages": { - "terms": { - "field": "message" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "location": { - "type": "geo_point", - "lat_lon": true - } - } - } - } -} -emit snippet -body part: -{ - "location": { - "lat": 41.12, - "lon": -71.34 - } -} -emit snippet -body part: -{ - "query": { - "geo_distance": { - "location": { - "lat": 41, - "lon": -71 - }, - "distance": "50km", - "optimize_bbox": "indexed" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "location": { - "type": "geo_point", - "geohash_prefix": true, - "geohash_precision": 6 - } - } - } - } -} -emit snippet -body part: -{ - "location": { - "lat": 41.12, - "lon": -71.34 - } -} -emit snippet -query part: fielddata_fields=location.geohash -body part: -{ - "query": { - "geohash_cell": { - "location": { - "lat": 41.02, - "lon": -71.48 - }, - "precision": 4, - "neighbors": true - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "status_code": { - "type": "keyword" - }, - "session_id": { - "type": "keyword", - "doc_values": false - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "text": { - "type": "text", - "index_options": "offsets" - } - } - } - } -} -emit snippet -body part: -{ - "text": "Quick brown fox" -} -emit snippet -body part: -{ - "query": { - "match": { - "text": "brown fox" - } - }, - "highlight": { - "fields": { - "text": {} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "manager": { - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } - }, - "employees": { - "type": "nested", - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } - } - } - } - } -} -emit snippet -body part: -{ - "region": "US", - "manager": { - "name": "Alice White", - "age": 30 - }, - "employees": [ - { - "name": "John Smith", - "age": 34 - }, - { - "name": "Peter Brown", - "age": 26 - } - ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match": { - "manager.name": "Alice White" - } - }, - "aggs": { - "Employees": { - "nested": { - "path": "employees" - }, - "aggs": { - "Employee Ages": { - "histogram": { - "field": "employees.age", - "interval": 5 - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "title": { - "type": "text" - }, - "content": { - "type": "text" - }, - "date": { - "type": "date", - "include_in_all": false - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "include_in_all": false, - "properties": { - "title": { "type": "text" }, - "author": { - "include_in_all": true, - "properties": { - "first_name": { "type": "text" }, - "last_name": { "type": "text" } - } - }, - "editor": { - "properties": { - "first_name": { "type": "text" }, - "last_name": { "type": "text", "include_in_all": true } - } - } - } - } - } -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "properties": { - "title": { - "type": "text", - "norms": false - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "title": { - "type": "text", - "boost": 2 - }, - "content": { - "type": "text" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match" : { - "title": { - "query": "quick brown fox" - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "match" : { - "title": { - "query": "quick brown fox", - "boost": 2 - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "default_field": { - "type": "text" - }, - "bm25_field": { - "type": "text", - "similarity": "BM25" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "location": { - "type": "geo_point", - "geohash": true - } - } - } - } -} -emit snippet -body part: -{ - "location": { - "lat": 41.12, - "lon": -71.34 - } -} -emit snippet -query part: fielddata_fields=location.geohash -body part: -{ - "query": { - "prefix": { - "location.geohash": "drm3b" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "text": { - "type": "text", - "fields": { - "english": { - "type": "text", - "analyzer": "english" - } - } - } - } - } - } -} -emit snippet -query part: wait_for_status=yellow -emit snippet -query part: field=text -body part: -{ - "text": "The quick Brown Foxes." -} -emit snippet -query part: field=text.english -body part: -{ - "text": "The quick Brown Foxes." -} -handle snippet -test snippet -emit snippet -body part: -{ - "settings":{ - "analysis":{ - "analyzer":{ - "my_analyzer":{ - "type":"custom", - "tokenizer":"standard", - "filter":[ - "lowercase" - ] - }, - "my_stop_analyzer":{ - "type":"custom", - "tokenizer":"standard", - "filter":[ - "lowercase", - "english_stop" - ] - } - }, - "filter":{ - "english_stop":{ - "type":"stop", - "stopwords":"_english_" - } - } - } - }, - "mappings":{ - "my_type":{ - "properties":{ - "title": { - "type":"text", - "analyzer":"my_analyzer", - "search_analyzer":"my_stop_analyzer", - "search_quote_analyzer":"my_analyzer" - } - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "region": "US", - "manager": { - "age": 30, - "name": { - "first": "John", - "last": "Smith" - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "region": { - "type": "keyword" - }, - "manager": { - "properties": { - "age": { "type": "integer" }, - "name": { - "properties": { - "first": { "type": "text" }, - "last": { "type": "text" } - } - } - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "date": { - "type": "date" - } - } - } - } -} -emit snippet -body part: -{ "date": "2015-01-01" } -emit snippet -body part: -{ "date": "2015-01-01T12:10:30Z" } -emit snippet -body part: -{ "date": 1420070400001 } -emit snippet -body part: -{ - "sort": { "date": "asc"} -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "date": { - "type": "date", - "format": "yyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "full_name": { - "type": "text" - } - } - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "name": { - "type": "text", - "fields": { - "length": { - "type": "token_count", - "analyzer": "standard" - } - } - } - } - } - } -} -emit snippet -body part: -{ "name": "John Smith" } -emit snippet -body part: -{ "name": "Rachel Alice Williams" } -emit snippet -body part: -{ - "query": { - "term": { - "name.length": 3 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "group" : "fans", - "user" : [ - { - "first" : "John", - "last" : "Smith" - }, - { - "first" : "Alice", - "last" : "White" - } - ] -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "bool": { - "must": [ - { "match": { "user.first": "Alice" }}, - { "match": { "user.last": "Smith" }} - ] - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "user": { - "type": "nested" - } - } - } - } -} -emit snippet -body part: -{ - "group" : "fans", - "user" : [ - { - "first" : "John", - "last" : "Smith" - }, - { - "first" : "Alice", - "last" : "White" - } - ] -} -emit snippet -body part: -{ - "query": { - "nested": { - "path": "user", - "query": { - "bool": { - "must": [ - { "match": { "user.first": "Alice" }}, - { "match": { "user.last": "Smith" }} - ] - } - } - } - } -} -emit snippet -body part: -{ - "query": { - "nested": { - "path": "user", - "query": { - "bool": { - "must": [ - { "match": { "user.first": "Alice" }}, - { "match": { "user.last": "White" }} - ] - } - }, - "inner_hits": { - "highlight": { - "fields": { - "user.first": {} - } - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "is_published": { - "type": "boolean" - } - } - } - } -} -emit snippet -body part: -{ - "is_published": true -} -emit snippet -body part: -{ - "query": { - "term": { - "is_published": 1 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "is_published": true -} -emit snippet -body part: -{ - "is_published": false -} -emit snippet -body part: -{ - "aggs": { - "publish_state": { - "terms": { - "field": "is_published" - } - } - }, - "script_fields": { - "is_published": { - "script": "doc['is_published'].value" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "number_of_bytes": { - "type": "integer" - }, - "time_in_seconds": { - "type": "float" - } - } - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "tags": { - "type": "keyword" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "location": { - "type": "geo_point" - } - } - } - } -} -emit snippet -body part: -{ - "text": "Geo-point as an object", - "location": { - "lat": 41.12, - "lon": -71.34 - } -} -emit snippet -body part: -{ - "text": "Geo-point as a string", - "location": "41.12,-71.34" -} -emit snippet -body part: -{ - "text": "Geo-point as a geohash", - "location": "drm3btev3e86" -} -emit snippet -body part: -{ - "text": "Geo-point as an array", - "location": [ -71.34, 41.12 ] -} -emit snippet -body part: -{ - "query": { - "geo_bounding_box": { - "location": { - "top_left": { - "lat": 42, - "lon": -72 - }, - "bottom_right": { - "lat": 40, - "lon": -74 - } - } - } - } -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "message": "some arrays in this document...", - "tags": [ "elasticsearch", "wow" ], - "lists": [ - { - "name": "prog_list", - "description": "programming list" - }, - { - "name": "cool_list", - "description": "cool stuff list" - } - ] -} -emit snippet -body part: -{ - "message": "no arrays in this document...", - "tags": "elasticsearch", - "lists": { - "name": "prog_list", - "description": "programming list" - } -} -emit snippet -body part: -{ - "query": { - "match": { - "tags": "elasticsearch" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "properties": { - "ip_addr": { - "type": "ip" - } - } - } - } -} -emit snippet -body part: -{ - "ip_addr": "192.168.1.1" -} -emit snippet -body part: -{ - "query": { - "term": { - "ip_addr": "192.168.0.0/16" - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "dynamic_templates": [ - { - "integers": { - "match_mapping_type": "long", - "mapping": { - "type": "integer" - } - } - }, - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "fields": { - "raw": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - } - ] - } - } -} -emit snippet -body part: -{ - "my_integer": 5, - "my_string": "Some string" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "dynamic_templates": [ - { - "longs_as_strings": { - "match_mapping_type": "string", - "match": "long_*", - "unmatch": "*_text", - "mapping": { - "type": "long" - } - } - } - ] - } - } -} -emit snippet -body part: -{ - "long_num": "5", - "long_text": "foo" -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "dynamic_templates": [ - { - "full_name": { - "path_match": "name.*", - "path_unmatch": "*.middle", - "mapping": { - "type": "text", - "copy_to": "full_name" - } - } - } - ] - } - } -} -emit snippet -body part: -{ - "name": { - "first": "Alice", - "middle": "Mary", - "last": "White" - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "dynamic_templates": [ - { - "named_analyzers": { - "match_mapping_type": "string", - "match": "*", - "mapping": { - "type": "text", - "analyzer": "{name}" - } - } - }, - { - "no_doc_values": { - "match_mapping_type":"*", - "mapping": { - "type": "{dynamic_type}", - "doc_values": false - } - } - } - ] - } - } -} -emit snippet -body part: -{ - "english": "Some English text", - "count": 5 -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "order": 0, - "template": "*", - "mappings": { - "_default_": { - "_all": { - "enabled": false - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "create_date": "2015/09/02" -} -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "date_detection": false - } - } -} -emit snippet -body part: -{ - "create": "2015/09/02" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "dynamic_date_formats": ["MM/dd/yyyy"] - } - } -} -emit snippet -body part: -{ - "create_date": "09/25/2015" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "numeric_detection": true - } - } -} -emit snippet -body part: -{ - "my_float": "1.0", - "my_integer": "1" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "_default_": { - "_all": { - "enabled": false - } - }, - "user": {}, - "blogpost": { - "_all": { - "enabled": true - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "template": "logs-*", - "settings": { "number_of_shards": 1 }, - "mappings": { - "_default_": { - "_all": { - "enabled": false - }, - "dynamic_templates": [ - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "fields": { - "raw": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - } - ] - } - } -} -emit snippet -body part: -{ "message": "error:16" } -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "_timestamp": { - "enabled": true - } - } - } -} -emit snippet -query part: timestamp=2015-01-01 -body part: -{ "text": "Timestamp as a formatted date" } -emit snippet -query part: timestamp=1420070400000 -body part: -{ "text": "Timestamp as milliseconds since the epoch" } -emit snippet -body part: -{ "text": "Autogenerated timestamp set to now()" } -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "range": { - "_timestamp": { - "gte": "2015-01-01" - } - } - }, - "aggs": { - "Timestamps": { - "terms": { - "field": "_timestamp", - "size": 10 - } - } - }, - "sort": [ - { - "_timestamp": { - "order": "desc" - } - } - ], - "script_fields": { - "Timestamp": { - "script": "doc['_timestamp']" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "first_name": "John", - "last_name": "Smith", - "date_of_birth": "1970-10-24" -} -emit snippet -body part: -{ - "query": { - "match": { - "_all": "john smith 1970" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "query_string": { - "query": "john smith 1970" - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "type_1": { - "properties": {} - }, - "type_2": { - "_all": { - "enabled": false - }, - "properties": {} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "_all": { - "enabled": false - }, - "properties": { - "content": { - "type": "text" - } - } - } - }, - "settings": { - "index.query.default_field": "content" - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "mytype": { - "properties": { - "title": { - "type": "text", - "boost": 2 - }, - "content": { - "type": "text" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "mytype": { - "properties": { - "first_name": { - "type": "text", - "copy_to": "full_name" - }, - "last_name": { - "type": "text", - "copy_to": "full_name" - }, - "full_name": { - "type": "text" - } - } - } - } -} -emit snippet -body part: -{ - "first_name": "John", - "last_name": "Smith" -} -emit snippet -body part: -{ - "query": { - "match": { - "full_name": "John Smith" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "mytype": { - "_all": { - "store": true - } - } - } -} -emit snippet -body part: -{ - "first_name": "John", - "last_name": "Smith" -} -emit snippet -body part: -{ - "query": { - "match": { - "_all": "John Smith" - } - }, - "highlight": { - "fields": { - "_all": {} - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "mytype": { - "_all": {} - } - } -} -emit snippet -body part: -{ - "first_name": "John", - "last_name": "Smith" -} -emit snippet -body part: -{ - "query": { - "match": { - "_all": "John Smith" - } - }, - "highlight": { - "fields": { - "*_name": { - "require_field_match": false - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "title": "This is a document" -} -emit snippet -body part: -{ - "title": "This is another document", - "body": "This document has a body" -} -emit snippet -body part: -{ - "query": { - "terms": { - "_field_names": [ "title" ] - } - }, - "script_fields": { - "Field names": { - "script": "doc['_field_names']" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "text": "Document in index 1" -} -emit snippet -body part: -{ - "text": "Document in index 2" -} -emit snippet -body part: -{ - "query": { - "terms": { - "_index": ["index_1", "index_2"] - } - }, - "aggs": { - "indices": { - "terms": { - "field": "_index", - "size": 10 - } - } - }, - "sort": [ - { - "_index": { - "order": "asc" - } - } - ], - "script_fields": { - "index_name": { - "script": "doc['_index']" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "tweet": { - "_source": { - "enabled": false - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "event": { - "_source": { - "includes": [ - "*.count", - "meta.*" - ], - "excludes": [ - "meta.description", - "meta.other.*" - ] - } - } - } -} -emit snippet -body part: -{ - "requests": { - "count": 10, - "foo": "bar" - }, - "meta": { - "name": "Some metric", - "description": "Some metric description", - "other": { - "foo": "one", - "baz": "two" - } - } -} -emit snippet -body part: -{ - "query": { - "match": { - "meta.other.foo": "one" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "text": "Document with type 1" -} -emit snippet -body part: -{ - "text": "Document with type 2" -} -emit snippet -body part: -{ - "query": { - "terms": { - "_type": [ "type_1", "type_2" ] - } - }, - "aggs": { - "types": { - "terms": { - "field": "_type", - "size": 10 - } - } - }, - "sort": [ - { - "_type": { - "order": "desc" - } - } - ], - "script_fields": { - "type": { - "script": "doc['_type']" - } - } -} -handle snippet -null -emit snippet -query part: routing=user1 -body part: -{ - "title": "This is a document" -} -emit snippet -query part: routing=user1 -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "terms": { - "_routing": [ "user1" ] - } - }, - "script_fields": { - "Routing value": { - "script": "doc['_routing']" - } - } -} -handle snippet -test snippet -emit snippet -query part: routing=user1,user2 -body part: -{ - "query": { - "match": { - "title": "document" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "_routing": { - "required": true - } - } - } -} -emit snippet -catch part: request -body part: -{ - "text": "No routing value provided" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "_ttl": { - "enabled": true - } - } - } -} -emit snippet -query part: ttl=10m -body part: -{ - "text": "Will expire in 10 minutes" -} -emit snippet -body part: -{ - "text": "Will not expire" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_type": { - "_ttl": { - "enabled": true, - "default": "5m" - } - } - } -} -emit snippet -query part: ttl=10m -body part: -{ - "text": "Will expire in 10 minutes" -} -emit snippet -body part: -{ - "text": "Will expire in 5 minutes" -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_parent": {}, - "my_child": { - "_parent": { - "type": "my_parent" - } - } - } -} -emit snippet -body part: -{ - "text": "This is a parent document" -} -emit snippet -query part: parent=1 -body part: -{ - "text": "This is a child document" -} -emit snippet -query part: parent=1 -body part: -{ - "text": "This is another child document" -} -emit snippet -body part: -{ - "query": { - "has_child": { - "type": "my_child", - "query": { - "match": { - "text": "child document" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "query": { - "terms": { - "_parent": [ "1" ] - } - }, - "aggs": { - "parents": { - "terms": { - "field": "_parent", - "size": 10 - } - } - }, - "script_fields": { - "parent": { - "script": "doc['_parent']" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "my_parent": {}, - "my_child": { - "_parent": { - "type": "my_parent", - "eager_global_ordinals": true - } - } - } -} -handle snippet -test snippet -emit snippet -query part: human&fields=_parent -emit snippet -query part: human&fields=_parent -handle snippet -test snippet -emit snippet -body part: -{ - "text": "Document with ID 1" -} -emit snippet -body part: -{ - "text": "Document with ID 2" -} -emit snippet -body part: -{ - "query": { - "terms": { - "_id": [ "1", "2" ] - } - }, - "script_fields": { - "UID": { - "script": "doc['_id']" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "text": "Document with ID 1" -} -emit snippet -body part: -{ - "text": "Document with ID 2" -} -emit snippet -body part: -{ - "query": { - "terms": { - "_uid": [ "my_type#1", "my_type#2" ] - } - }, - "aggs": { - "UIDs": { - "terms": { - "field": "_uid", - "size": 10 - } - } - }, - "sort": [ - { - "_uid": { - "order": "desc" - } - } - ], - "script_fields": { - "UID": { - "script": "doc['_uid']" - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "user": { - "_meta": { - "class": "MyApp::User", - "version": { - "min": "1.0", - "max": "1.3" - } - } - } - } -} -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -body part: -{ - "settings": { - "index.priority": 10 - } -} -emit snippet -body part: -{ - "settings": { - "index.priority": 5 - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "index.priority": 1 -} -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "settings": { - "index.unassigned.node_left.delayed_timeout": "5m" - } -} -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "settings": { - "index.unassigned.node_left.delayed_timeout": "0" - } -} -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "index.routing.allocation.include.size": "big,medium" -} -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "index.routing.allocation.exclude.size": "small" -} -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "index.routing.allocation.include.size": "big", - "index.routing.allocation.include.rack": "rack1" -} -handle snippet -test snippet -emit snippet -body part: -{ - "index.routing.allocation.include._ip": "192.168.2.*" -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "tokenizer" : "standard", - "token_filter" : ["snowball"], - "text" : "detailed output", - "explain" : true, - "attributes" : ["keyword"] -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "template": "te*", - "settings": { - "number_of_shards": 1 - }, - "mappings": { - "type1": { - "_source": { - "enabled": false - }, - "properties": { - "host_name": { - "type": "keyword" - }, - "created_at": { - "type": "date", - "format": "EEE MMM dd HH:mm:ss Z YYYY" - } - } - } - } -} -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "tweet": { - "properties": { - "message": { - "type": "text" - } - } - } - } -} -emit snippet -body part: -{ - "properties": { - "name": { - "type": "text" - } - } -} -emit snippet -body part: -{ - "properties": { - "user_name": { - "type": "text" - } - } -} -handle snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "user": { - "properties": { - "name": { - "properties": { - "first": { - "type": "text" - } - } - }, - "user_id": { - "type": "keyword" - } - } - } - } -} -emit snippet -body part: -{ - "properties": { - "name": { - "properties": { - "last": { - "type": "text" - } - } - }, - "user_id": { - "type": "keyword", - "ignore_above": 100 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "type_one": { - "properties": { - "text": { - "type": "text", - "analyzer": "standard" - } - } - }, - "type_two": { - "properties": { - "text": { - "type": "text", - "analyzer": "standard" - } - } - } - } -} -emit snippet -catch part: request -body part: -{ - "properties": { - "text": { - "type": "text", - "analyzer": "standard", - "search_analyzer": "whitespace" - } - } -} -handle snippet -test snippet -emit snippet -query part: update_all_types -body part: -{ - "properties": { - "text": { - "type": "text", - "analyzer": "standard", - "search_analyzer": "whitespace" - } - } -} -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "actions" : [ - { "add" : { "index" : "test1", "alias" : "alias1" } } - ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "actions" : [ - { "remove" : { "index" : "test1", "alias" : "alias1" } } - ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "actions" : [ - { "remove" : { "index" : "test1", "alias" : "alias1" } }, - { "add" : { "index" : "test1", "alias" : "alias2" } } - ] -} -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -body part: -{ - "actions" : [ - { "add" : { "index" : "test1", "alias" : "alias1" } }, - { "add" : { "index" : "test2", "alias" : "alias1" } } - ] -} -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -body part: -{ - "actions" : [ - { "add" : { "indices" : ["test1", "test2"], "alias" : "alias1" } } - ] -} -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -body part: -{ - "actions" : [ - { "add" : { "index" : "test*", "alias" : "all_test_indices" } } - ] -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings": { - "type1": { - "properties": { - "user" : { - "type": "keyword" - } - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "actions" : [ - { - "add" : { - "index" : "test1", - "alias" : "alias2", - "filter" : { "term" : { "user" : "kimchy" } } - } - } - ] -} -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "actions" : [ - { - "add" : { - "index" : "test", - "alias" : "alias1", - "routing" : "1" - } - } - ] -} -handle snippet -test snippet -emit snippet -emit snippet -body part: -{ - "actions" : [ - { - "add" : { - "index" : "test", - "alias" : "alias2", - "search_routing" : "1,2", - "index_routing" : "2" - } - } - ] -} -handle snippet -test snippet -emit snippet -query part: q=user:kimchy&routing=2,3 -handle snippet -test snippet -emit snippet -emit snippet -handle snippet -test snippet -emit snippet -body part: -{ - "mappings" : { - "user" : { - "properties" : { - "user_id" : {"type" : "integer"} - } - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "routing" : "12", - "filter" : { - "term" : { - "user_id" : 12 - } - } -} -handle snippet -test snippet -emit snippet -body part: -{ - "mappings" : { - "type" : { - "properties" : { - "year" : {"type" : "integer"} - } - } - }, - "aliases" : { - "current_day" : {}, - "2016" : { - "filter" : { - "term" : {"year" : 2016 } - } - } - } -} -handle snippet -test snippet -emit snippet -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -test snippet -emit snippet -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -handle snippet -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -handle snippet -test snippet -emit snippet -emit snippet -emit snippet -emit snippet -handle snippet -test snippet -emit snippet -emit snippet -query part: level=shards -handle snippet -handle snippet -test snippet - - - do: - bulk: - index: twitter - type: tweet - refresh: true - body: | - {"index":{"_id": "0"}} - {"user": "kimchy", "message": "trying out Elasticsearch", "date": "2009-11-15T14:12:12", "likes": 0} - {"index":{"_id": "1"}} - {"user": "test", "message": "some message with the number 1", "date": "2009-11-15T14:12:12", "likes": 1} - {"index":{"_id": "2"}} - {"user": "test", "message": "some message with the number 2", "date": "2009-11-15T14:12:12", "likes": 2} - {"index":{"_id": "3"}} - {"user": "test", "message": "some message with the number 3", "date": "2009-11-15T14:12:12", "likes": 3} - {"index":{"_id": "4"}} - {"user": "test", "message": "some message with the number 4", "date": "2009-11-15T14:12:12", "likes": 4} -emit snippet -handle snippet -handle snippet -handle snippet -handle snippet -test snippet -emit snippet -emit snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -handle snippet -:core:compileJava UP-TO-DATE -:core:generateModulesList UP-TO-DATE -:core:generatePluginsList UP-TO-DATE -:core:processResources UP-TO-DATE -:core:classes UP-TO-DATE -:core:jar UP-TO-DATE -:test:framework:compileJava UP-TO-DATE -:test:framework:processResources UP-TO-DATE -:test:framework:classes UP-TO-DATE -:test:framework:jar UP-TO-DATE -:docs:compileTestJava -:docs:testClasses -:docs:integTest#prepareCluster.cleanShared -:docs:integTest#clean -:docs:integTest#checkPrevious SKIPPED -:docs:integTest#stopPrevious SKIPPED -:modules:aggs-matrix-stats:compileJava UP-TO-DATE -:modules:aggs-matrix-stats:processResources UP-TO-DATE -:modules:aggs-matrix-stats:classes UP-TO-DATE -:modules:aggs-matrix-stats:jar UP-TO-DATE -:modules:aggs-matrix-stats:copyPluginPropertiesTemplate -:modules:aggs-matrix-stats:pluginProperties UP-TO-DATE -:modules:aggs-matrix-stats:bundlePlugin UP-TO-DATE -:modules:ingest-grok:compileJava UP-TO-DATE -:modules:ingest-grok:processResources UP-TO-DATE -:modules:ingest-grok:classes UP-TO-DATE -:modules:ingest-grok:jar UP-TO-DATE -:modules:ingest-grok:copyPluginPropertiesTemplate -:modules:ingest-grok:pluginProperties UP-TO-DATE -:modules:ingest-grok:bundlePlugin UP-TO-DATE -:modules:lang-expression:compileJava UP-TO-DATE -:modules:lang-expression:processResources UP-TO-DATE -:modules:lang-expression:classes UP-TO-DATE -:modules:lang-expression:jar UP-TO-DATE -:modules:lang-expression:copyPluginPropertiesTemplate -:modules:lang-expression:pluginProperties UP-TO-DATE -:modules:lang-expression:bundlePlugin UP-TO-DATE -:modules:lang-groovy:compileJava UP-TO-DATE -:modules:lang-groovy:processResources UP-TO-DATE -:modules:lang-groovy:classes UP-TO-DATE -:modules:lang-groovy:jar UP-TO-DATE -:modules:lang-groovy:copyPluginPropertiesTemplate -:modules:lang-groovy:pluginProperties UP-TO-DATE -:modules:lang-groovy:bundlePlugin UP-TO-DATE -:modules:lang-mustache:compileJava UP-TO-DATE -:modules:lang-mustache:processResources UP-TO-DATE -:modules:lang-mustache:classes UP-TO-DATE -:modules:lang-mustache:jar UP-TO-DATE -:modules:lang-mustache:copyPluginPropertiesTemplate -:modules:lang-mustache:pluginProperties UP-TO-DATE -:modules:lang-mustache:bundlePlugin UP-TO-DATE -:modules:lang-painless:compileJava UP-TO-DATE -:modules:lang-painless:processResources UP-TO-DATE -:modules:lang-painless:classes UP-TO-DATE -:modules:lang-painless:jar UP-TO-DATE -:modules:lang-painless:copyPluginPropertiesTemplate -:modules:lang-painless:pluginProperties UP-TO-DATE -:modules:lang-painless:bundlePlugin UP-TO-DATE -:modules:percolator:compileJava UP-TO-DATE -:modules:percolator:processResources UP-TO-DATE -:modules:percolator:classes UP-TO-DATE -:modules:percolator:jar UP-TO-DATE -:modules:percolator:copyPluginPropertiesTemplate -:modules:percolator:pluginProperties UP-TO-DATE -:modules:percolator:bundlePlugin UP-TO-DATE -:modules:reindex:compileJava UP-TO-DATE -:modules:reindex:processResources UP-TO-DATE -:modules:reindex:classes UP-TO-DATE -:modules:reindex:jar UP-TO-DATE -:modules:reindex:copyPluginPropertiesTemplate -:modules:reindex:pluginProperties UP-TO-DATE -:modules:reindex:bundlePlugin UP-TO-DATE -:distribution:buildModules UP-TO-DATE -:distribution:zip:buildZip UP-TO-DATE -:docs:integTest#extract -:docs:integTest#configure -:docs:integTest#start -:docs:integTest#wait -:docs:integTest - [junit4] says ahoj! Master seed: E40438BBAF23DBE2 -==> Test Info: seed=E40438BBAF23DBE2; jvm=1; suite=1 -Suite: org.elasticsearch.smoketest.SmokeTestDocsIT -==> Test Summary: 1 suite (1 ignored), 0 tests - [junit4] JVM J0: 0.30 .. 1.64 = 1.35s - [junit4] Execution time total: 1.66 sec. - [junit4] Tests summary: 1 suite (1 ignored), 0 tests -:docs:integTest FAILED -:docs:integTest#stop - -BUILD FAILED - -Total time: 22.834 secs - -BUILD SUCCESSFUL -Total time: 1 second From 08f7f79b2e66706daa9ce6eb49674012c68097cb Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 3 Jun 2016 16:32:35 -0400 Subject: [PATCH 16/39] Wrap lines at 140 characters (:qa projects) --- .../src/main/resources/checkstyle_suppressions.xml | 8 -------- .../elasticsearch/bootstrap/ESPolicyUnitTests.java | 3 ++- .../elasticsearch/bootstrap/EvilSecurityTests.java | 3 ++- .../java/org/elasticsearch/tribe/TribeUnitTests.java | 5 ++--- .../ingest/IngestDocumentMustacheIT.java | 12 ++++++++---- .../ingest/IngestMustacheSetProcessorIT.java | 6 ++++-- 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index c77ef0f9d5b..0245cda2fe1 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1273,14 +1273,6 @@ - - - - - - - - diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java index 220d093301c..255dfb3ec5c 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java @@ -64,6 +64,7 @@ public class ESPolicyUnitTests extends ESTestCase { assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); PermissionCollection noPermissions = new Permissions(); ESPolicy policy = new ESPolicy(noPermissions, Collections.emptyMap(), true); - assertFalse(policy.implies(new ProtectionDomain(new CodeSource(null, (Certificate[])null), noPermissions), new FilePermission("foo", "read"))); + assertFalse(policy.implies(new ProtectionDomain(new CodeSource(null, (Certificate[]) null), noPermissions), + new FilePermission("foo", "read"))); } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java index df54cf87fa4..6850f0026e6 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java @@ -77,7 +77,8 @@ public class EvilSecurityTests extends ESTestCase { settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.resolve("home").toString()); settingsBuilder.put(Environment.PATH_CONF_SETTING.getKey(), esHome.resolve("conf").toString()); settingsBuilder.put(Environment.PATH_SCRIPTS_SETTING.getKey(), esHome.resolve("scripts").toString()); - settingsBuilder.putArray(Environment.PATH_DATA_SETTING.getKey(), esHome.resolve("data1").toString(), esHome.resolve("data2").toString()); + settingsBuilder.putArray(Environment.PATH_DATA_SETTING.getKey(), esHome.resolve("data1").toString(), + esHome.resolve("data2").toString()); settingsBuilder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), esHome.resolve("custom").toString()); settingsBuilder.put(Environment.PATH_LOGS_SETTING.getKey(), esHome.resolve("logs").toString()); settingsBuilder.put(Environment.PIDFILE_SETTING.getKey(), esHome.resolve("test.pid").toString()); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index f9cdf5b4f66..4199a5d67cd 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.Node; -import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -111,8 +110,8 @@ public class TribeUnitTests extends ESTestCase { assertThat(state.getClusterName().value(), equalTo("tribe_node_cluster")); assertThat(state.getNodes().getSize(), equalTo(5)); for (DiscoveryNode discoveryNode : state.getNodes()) { - assertThat(discoveryNode.getName(), either(equalTo("tribe1_node")).or(equalTo("tribe2_node")).or(equalTo("tribe_node")) - .or(equalTo("tribe_node/t1")).or(equalTo("tribe_node/t2"))); + assertThat(discoveryNode.getName(), either(equalTo("tribe1_node")).or(equalTo("tribe2_node")) + .or(equalTo("tribe_node")).or(equalTo("tribe_node/t1")).or(equalTo("tribe_node/t2"))); } } }); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java index c8c50603625..2314e273fc6 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java @@ -52,10 +52,12 @@ public class IngestDocumentMustacheIT extends AbstractMustacheTestCase { innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); - ingestDocument.setFieldValue(templateService.compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", templateService)); + ingestDocument.setFieldValue(templateService.compile("field1"), + ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", templateService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); - ingestDocument.setFieldValue(templateService.compile("field1"), ValueSource.wrap("2 {{_source.foo.bar}} {{_source.foo.baz}} {{_source.foo.qux.fubar}}", templateService)); + ingestDocument.setFieldValue(templateService.compile("field1"), + ValueSource.wrap("2 {{_source.foo.bar}} {{_source.foo.baz}} {{_source.foo.qux.fubar}}", templateService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 hello bar hello baz hello qux and fubar")); } @@ -79,7 +81,9 @@ public class IngestDocumentMustacheIT extends AbstractMustacheTestCase { ingestMap.put("timestamp", "bogus_timestamp"); document.put("_ingest", ingestMap); IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); - ingestDocument.setFieldValue(templateService.compile("ingest_timestamp"), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", templateService)); - assertThat(ingestDocument.getFieldValue("ingest_timestamp", String.class), equalTo(ingestDocument.getIngestMetadata().get("timestamp") + " and bogus_timestamp")); + ingestDocument.setFieldValue(templateService.compile("ingest_timestamp"), + ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", templateService)); + assertThat(ingestDocument.getFieldValue("ingest_timestamp", String.class), + equalTo(ingestDocument.getIngestMetadata().get("timestamp") + " and bogus_timestamp")); } } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java index ed5ad65466b..f088f84d144 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestMustacheSetProcessorIT.java @@ -50,11 +50,13 @@ public class IngestMustacheSetProcessorIT extends AbstractMustacheTestCase { } public void testSetWithTemplates() throws Exception { - IngestDocument.MetaData randomMetaData = randomFrom(IngestDocument.MetaData.INDEX, IngestDocument.MetaData.TYPE, IngestDocument.MetaData.ID); + IngestDocument.MetaData randomMetaData = randomFrom(IngestDocument.MetaData.INDEX, IngestDocument.MetaData.TYPE, + IngestDocument.MetaData.ID); Processor processor = createSetProcessor("field{{_type}}", "_value {{" + randomMetaData.getFieldName() + "}}"); IngestDocument ingestDocument = createIngestDocument(new HashMap<>()); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue("field_type", String.class), Matchers.equalTo("_value " + ingestDocument.getFieldValue(randomMetaData.getFieldName(), String.class))); + assertThat(ingestDocument.getFieldValue("field_type", String.class), + Matchers.equalTo("_value " + ingestDocument.getFieldValue(randomMetaData.getFieldName(), String.class))); } private SetProcessor createSetProcessor(String fieldName, Object fieldValue) throws Exception { From 4ca04d6f6cbc819da030ecdea3c3694192b7aa08 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 3 Jun 2016 16:15:04 +0200 Subject: [PATCH 17/39] Close SearchContext if query rewrite failed If a query failed to be rewritten and throws an exception, the SearchContext is not properly closed, skewing the ref count on the underlying Store. --- .../index/query/WrapperQueryBuilder.java | 2 +- .../action/search/RestMultiSearchAction.java | 2 +- .../elasticsearch/search/SearchService.java | 14 +-- .../search/SearchServiceTests.java | 100 ++++++++++++++++++ 4 files changed, 109 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java index 4e1eb83272a..7e469d6bbe4 100644 --- a/core/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java @@ -126,7 +126,7 @@ public class WrapperQueryBuilder extends AbstractQueryBuilder imp indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout, fetchPhase); SearchContext.setCurrent(context); - request.rewrite(context.getQueryShardContext()); - // reset that we have used nowInMillis from the context since it may - // have been rewritten so its no longer in the query and the request can - // be cached. If it is still present in the request (e.g. in a range - // aggregation) it will still be caught when the aggregation is - // evaluated. - context.resetNowInMillisUsed(); try { + request.rewrite(context.getQueryShardContext()); + // reset that we have used nowInMillis from the context since it may + // have been rewritten so its no longer in the query and the request can + // be cached. If it is still present in the request (e.g. in a range + // aggregation) it will still be caught when the aggregation is + // evaluated. + context.resetNowInMillisUsed(); if (request.scroll() != null) { context.scrollContext(new ScrollContext()); context.scrollContext().scroll = request.scroll(); diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index a222ff52722..2ac00b541af 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -19,9 +19,25 @@ package org.elasticsearch.search; +import org.apache.lucene.search.Query; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.io.IOException; +import java.util.Collection; import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -35,6 +51,11 @@ public class SearchServiceTests extends ESSingleNodeTestCase { return true; } + @Override + protected Collection> getPlugins() { + return pluginList(FailOnRewriteQueryPlugin.class); + } + public void testClearOnClose() throws ExecutionException, InterruptedException { createIndex("index"); client().prepareIndex("index", "type", "1").setSource("field", "value").setRefresh(true).get(); @@ -70,4 +91,83 @@ public class SearchServiceTests extends ESSingleNodeTestCase { assertAcked(client().admin().indices().prepareDelete("index")); assertEquals(0, service.getActiveContexts()); } + + public void testCloseSearchContextOnRewriteException() { + createIndex("index"); + client().prepareIndex("index", "type", "1").setSource("field", "value").setRefresh(true).get(); + + SearchService service = getInstanceFromNode(SearchService.class); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + IndexShard indexShard = indexService.getShard(0); + + final int activeContexts = service.getActiveContexts(); + final int activeRefs = indexShard.store().refCount(); + expectThrows(SearchPhaseExecutionException.class, () -> + client().prepareSearch("index").setQuery(new FailOnRewriteQueryBuilder()).get()); + assertEquals(activeContexts, service.getActiveContexts()); + assertEquals(activeRefs, indexShard.store().refCount()); + } + + public static class FailOnRewriteQueryPlugin extends Plugin { + + @Override + public String name() { + return FailOnRewriteQueryPlugin.class.getSimpleName(); + } + + @Override + public String description() { + return "This plugin registers a query that always fails at rewrite phase"; + } + + public void onModule(SearchModule module) { + module.registerQuery(FailOnRewriteQueryBuilder::new, parseContext -> { + throw new UnsupportedOperationException("No query parser for this plugin"); + }, new ParseField("fail_on_rewrite_query")); + } + } + + public static class FailOnRewriteQueryBuilder extends AbstractQueryBuilder { + + public FailOnRewriteQueryBuilder(StreamInput in) throws IOException { + super(in); + } + + public FailOnRewriteQueryBuilder() { + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + throw new IllegalStateException("Fail on rewrite phase"); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + return null; + } + + @Override + protected boolean doEquals(FailOnRewriteQueryBuilder other) { + return false; + } + + @Override + protected int doHashCode() { + return 0; + } + + @Override + public String getWriteableName() { + return null; + } + } } From a1172d816cc3d8404347b8770c162b4f938fc486 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 27 May 2016 16:34:53 +0200 Subject: [PATCH 18/39] Implement ctx.op = "delete" on _update_by_query and _reindex closes #18043 --- docs/reference/docs/reindex.asciidoc | 20 +++ docs/reference/docs/update-by-query.asciidoc | 25 ++- .../AbstractAsyncBulkIndexByScrollAction.java | 145 ++++++++++++++- .../AbstractBaseReindexRestHandler.java | 3 +- .../index/reindex/BulkByScrollTask.java | 10 +- .../reindex/RestDeleteByQueryAction.java | 2 +- .../index/reindex/RestReindexAction.java | 2 +- .../reindex/RestUpdateByQueryAction.java | 2 +- ...BulkIndexByScrollActionScriptTestCase.java | 30 +++- .../rest-api-spec/test/reindex/10_script.yaml | 92 ++++++++++ .../test/update_by_query/10_script.yaml | 168 ++++++++++++++++++ 11 files changed, 468 insertions(+), 31 deletions(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 0bfb44b2aaf..3311049c699 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -30,6 +30,7 @@ That will return something like this: "timed_out": false, "created": 120, "updated": 0, + "deleted": 0, "batches": 1, "version_conflicts": 0, "noops": 0, @@ -244,6 +245,24 @@ POST _reindex // CONSOLE // TEST[setup:twitter] +Just as in `_update_by_query`, you can set `ctx.op` to change the +operation that is executed on the destination index: + +`noop`:: + +Set `ctx.op = "noop"` if your script decides that the document doesn't have +to be indexed in the destination index. This no operation will be reported +in the `noop` counter in the <>. + +`delete`:: + +Set `ctx.op = "delete"` if your script decides that the document must be + deleted from the destination index. The deletion will be reported in the + `deleted` counter in the <>. + +Setting `ctx.op` to anything else is an error. Setting any +other field in `ctx` is an error. + Think of the possibilities! Just be careful! With great power.... You can change: @@ -377,6 +396,7 @@ starting the next set. This is "bursty" instead of "smooth". The default is `unlimited` which is also the only non-number value that it accepts. [float] +[[docs-reindex-response-body]] === Response body The JSON response looks like this: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index ac4da4251be..f3a147e36c1 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -23,6 +23,7 @@ That will return something like this: "took" : 147, "timed_out": false, "updated": 120, + "deleted": 0, "batches": 1, "version_conflicts": 0, "noops": 0, @@ -115,11 +116,24 @@ POST twitter/_update_by_query // CONSOLE // TEST[setup:twitter] -Just as in <> you can set `ctx.op = "noop"` if -your script decides that it doesn't have to make any changes. That will cause -`_update_by_query` to omit that document from its updates. Setting `ctx.op` to -anything else is an error. If you want to delete by a query you can use the -<> instead. Setting any +Just as in <> you can set `ctx.op` to change the +operation that is executed: + + +`noop`:: + +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any +changes. That will cause `_update_by_query` to omit that document from its updates. + This no operation will be reported in the `noop` counter in the +<>. + +`delete`:: + +Set `ctx.op = "delete"` if your script decides that the document must be + deleted. The deletion will be reported in the `deleted` counter in the +<>. + +Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. Note that we stopped specifying `conflicts=proceed`. In this case we want a @@ -212,6 +226,7 @@ starting the next set. This is "bursty" instead of "smooth". The default is `unlimited` which is also the only non-number value that it accepts. [float] +[[docs-update-by-query-response-body]] === Response body The JSON response looks like this: diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index 0e3f3678851..6cb62207506 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -48,7 +48,9 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.threadpool.ThreadPool; +import java.util.Arrays; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.BiFunction; @@ -173,18 +175,30 @@ public abstract class AbstractAsyncBulkIndexByScrollAction resultCtx = (Map) executable.unwrap(context); String newOp = (String) resultCtx.remove("op"); if (newOp == null) { - throw new IllegalArgumentException("Script cleared op!"); - } - if ("noop".equals(newOp)) { - task.countNoop(); - return null; - } - if (false == "update".equals(newOp)) { - throw new IllegalArgumentException("Invalid op [" + newOp + ']'); + throw new IllegalArgumentException("Script cleared operation type"); } /* @@ -468,12 +538,35 @@ public abstract class AbstractAsyncBulkIndexByScrollAction scriptChangedOpType(RequestWrapper request, OpType oldOpType, OpType newOpType) { + switch (newOpType) { + case NOOP: + task.countNoop(); + return null; + case DELETE: + RequestWrapper delete = wrap(new DeleteRequest(request.getIndex(), request.getType(), request.getId())); + delete.setVersion(request.getVersion()); + delete.setVersionType(VersionType.INTERNAL); + delete.setParent(request.getParent()); + delete.setRouting(request.getRouting()); + return delete; + default: + throw new IllegalArgumentException("Unsupported operation type change from [" + oldOpType + "] to [" + newOpType + "]"); + } + } + protected abstract void scriptChangedIndex(RequestWrapper request, Object to); protected abstract void scriptChangedType(RequestWrapper request, Object to); @@ -489,5 +582,39 @@ public abstract class AbstractAsyncBulkIndexByScrollAction request, Object to); protected abstract void scriptChangedTTL(RequestWrapper request, Object to); + + } + + public enum OpType { + + NOOP("noop"), + INDEX("index"), + DELETE("delete"); + + private final String id; + + OpType(String id) { + this.id = id; + } + + public static OpType fromString(String opType) { + String lowerOpType = opType.toLowerCase(Locale.ROOT); + switch (lowerOpType) { + case "noop": + return OpType.NOOP; + case "index": + return OpType.INDEX; + case "delete": + return OpType.DELETE; + default: + throw new IllegalArgumentException("Operation type [" + lowerOpType + "] not allowed, only " + + Arrays.toString(values()) + " are allowed"); + } + } + + @Override + public String toString() { + return id.toLowerCase(Locale.ROOT); + } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java index e78a6a9c350..3aea4dbce8a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBaseReindexRestHandler.java @@ -64,7 +64,7 @@ public abstract class AbstractBaseReindexRestHandler< } protected void handleRequest(RestRequest request, RestChannel channel, - boolean includeCreated, boolean includeUpdated, boolean includeDeleted) throws IOException { + boolean includeCreated, boolean includeUpdated) throws IOException { // Build the internal request Request internal = setCommonOptions(request, buildRequest(request)); @@ -74,7 +74,6 @@ public abstract class AbstractBaseReindexRestHandler< Map params = new HashMap<>(); params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(includeCreated)); params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(includeUpdated)); - params.put(BulkByScrollTask.Status.INCLUDE_DELETED, Boolean.toString(includeDeleted)); action.execute(internal, new BulkIndexByScrollResponseContentListener<>(channel, params)); return; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 40ee492b4a9..5197cdae079 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -126,12 +126,6 @@ public class BulkByScrollTask extends CancellableTask { */ public static final String INCLUDE_UPDATED = "include_updated"; - /** - * XContent param name to indicate if "deleted" count must be included - * in the response. - */ - public static final String INCLUDE_DELETED = "include_deleted"; - private final long total; private final long updated; private final long created; @@ -213,9 +207,7 @@ public class BulkByScrollTask extends CancellableTask { if (params.paramAsBoolean(INCLUDE_CREATED, true)) { builder.field("created", created); } - if (params.paramAsBoolean(INCLUDE_DELETED, true)) { - builder.field("deleted", deleted); - } + builder.field("deleted", deleted); builder.field("batches", batches); builder.field("version_conflicts", versionConflicts); builder.field("noops", noops); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java index 4750fe22313..bb894584c8b 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestDeleteByQueryAction.java @@ -55,7 +55,7 @@ public class RestDeleteByQueryAction extends AbstractBulkByQueryRestHandler> scriptBody) { + @SuppressWarnings("unchecked") + protected > T applyScript(Consumer> scriptBody) { IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar")); Map fields = new HashMap<>(); InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), fields); @@ -66,8 +71,8 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< when(scriptService.executable(any(CompiledScript.class), Matchers.>any())) .thenReturn(executableScript); AbstractAsyncBulkIndexByScrollAction action = action(scriptService, request().setScript(EMPTY_SCRIPT)); - action.buildScriptApplier().apply(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc); - return index; + RequestWrapper result = action.buildScriptApplier().apply(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc); + return (result != null) ? (T) result.self() : null; } public void testScriptAddingJunkToCtxIsError() { @@ -88,5 +93,24 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< assertEquals("cat", index.sourceAsMap().get("bar")); } + public void testSetOpTypeNoop() throws Exception { + assertThat(task.getStatus().getNoops(), equalTo(0L)); + assertNull(applyScript((Map ctx) -> ctx.put("op", OpType.NOOP.toString()))); + assertThat(task.getStatus().getNoops(), equalTo(1L)); + } + + public void testSetOpTypeDelete() throws Exception { + DeleteRequest delete = applyScript((Map ctx) -> ctx.put("op", OpType.DELETE.toString())); + assertThat(delete.index(), equalTo("index")); + assertThat(delete.type(), equalTo("type")); + assertThat(delete.id(), equalTo("1")); + } + + public void testSetOpTypeUnknown() throws Exception { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> applyScript((Map ctx) -> ctx.put("op", "unknown"))); + assertThat(e.getMessage(), equalTo("Operation type [unknown] not allowed, only [noop, index, delete] are allowed")); + } + protected abstract AbstractAsyncBulkIndexByScrollAction action(ScriptService scriptService, Request request); } diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml index aa553a5c9dc..783838d7f8e 100644 --- a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/10_script.yaml @@ -284,6 +284,11 @@ user: notfoo - match: { hits.total: 0 } + - do: + count: + index: new_twitter + - match: {count: 1} + --- "Noop all docs": - do: @@ -313,6 +318,11 @@ - match: {updated: 0} - match: {noops: 2} + - do: + indices.exists: + index: new_twitter + - is_false: '' + --- "Set version to null to force an update": - do: @@ -443,3 +453,85 @@ match: user: another - match: { hits.total: 1 } + +--- +"Reindex all docs with one doc deletion": + # Source index + - do: + index: + index: index1 + type: type1 + id: 1 + body: { "lang": "en", "id": 123 } + - do: + index: + index: index1 + type: type1 + id: 2 + body: { "lang": "en", "id": 456 } + - do: + index: + index: index1 + type: type1 + id: 3 + body: { "lang": "fr", "id": 789 } + # Destination index + - do: + index: + index: index2 + type: type2 + id: fr_789 + body: { "lang": "fr", "id": 789 } + - do: + index: + index: index2 + type: type2 + id: en_123 + body: { "lang": "en", "id": 123 } + - do: + indices.refresh: {} + + # Reindex all documents from "index1" into "index2", changing their type + # to "type2" and their id to the concatened lang+id fields, + # trashing all non-english pre existing ones + - do: + reindex: + refresh: true + body: + source: + index: index1 + dest: + index: index2 + type: type2 + script: + inline: "ctx._id = ctx._source.lang + '_' + ctx._source.id; + if (ctx._source.lang != \"en\" ) {ctx.op = 'delete'}" + - match: {created: 1} + - match: {noops: 0} + - match: {updated: 1} + - match: {deleted: 1} + + - do: + mget: + body: + docs: + - { _index: index2, _type: type2, _id: en_123} + - { _index: index2, _type: type2, _id: en_456} + - { _index: index2, _type: type2, _id: fr_789} + + - is_true: docs.0.found + - match: { docs.0._index: index2 } + - match: { docs.0._type: type2 } + - match: { docs.0._id: en_123 } + - match: { docs.0._version: 2 } + + - is_true: docs.1.found + - match: { docs.1._index: index2 } + - match: { docs.1._type: type2 } + - match: { docs.1._id: en_456 } + - match: { docs.1._version: 1 } + + - is_false: docs.2.found + - match: { docs.2._index: index2 } + - match: { docs.2._type: type2 } + - match: { docs.2._id: fr_789 } diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml index 54a79ac1e32..e4fef86b1d1 100644 --- a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml +++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update_by_query/10_script.yaml @@ -138,3 +138,171 @@ body: script: inline: ctx._id = "stuff" + +--- +"Update all docs with one doc deletion": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "level": 9, "last_updated": "2016-01-01T12:10:30Z" } + - do: + index: + index: twitter + type: tweet + id: 2 + body: { "level": 10, "last_updated": "2016-01-01T12:10:30Z" } + - do: + index: + index: twitter + type: tweet + id: 3 + body: { "level": 11, "last_updated": "2016-01-01T12:10:30Z" } + - do: + index: + index: twitter + type: tweet + id: 4 + body: { "level": 12, "last_updated": "2016-01-01T12:10:30Z" } + - do: + indices.refresh: {} + + - do: + update_by_query: + refresh: true + index: twitter + body: + script: + inline: if (ctx._source.level != 11) {ctx._source.last_updated = "2016-01-02T00:00:00Z"} else {ctx.op = "delete"} + - match: {updated: 3} + - match: {deleted: 1} + - match: {noops: 0} + + - do: + search: + index: twitter + body: + query: + match: + last_updated: "2016-01-02T00:00:00Z" + - match: { hits.total: 3 } + + - do: + search: + index: twitter + body: + query: + term: + level: 11 + - match: { hits.total: 0 } + + - do: + count: + index: twitter + - match: {count: 3} + +--- +"Update all docs with one deletion and one noop": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "level": 9, "last_updated": "2016-01-01T12:10:30Z" } + - do: + index: + index: twitter + type: tweet + id: 2 + body: { "level": 10, "last_updated": "2016-01-01T12:10:30Z" } + - do: + index: + index: twitter + type: tweet + id: 3 + body: { "level": 11, "last_updated": "2016-01-01T12:10:30Z" } + - do: + index: + index: twitter + type: tweet + id: 4 + body: { "level": 12, "last_updated": "2016-01-01T12:10:30Z" } + - do: + indices.refresh: {} + + - do: + update_by_query: + refresh: true + index: twitter + body: + script: + inline: "switch (ctx._source.level % 3) { + case 0: + ctx._source.last_updated = \"2016-01-02T00:00:00Z\"; + break; + case 1: + ctx.op = \"noop\"; + break; + case 2: + ctx.op = \"delete\"; + break; + }" + - match: {updated: 2} + - match: {deleted: 1} + - match: {noops: 1} + + - do: + search: + index: twitter + body: + query: + match: + last_updated: "2016-01-02T00:00:00Z" + - match: { hits.total: 2 } + + - do: + search: + index: twitter + body: + query: + match: + last_updated: "2016-01-01T12:10:30Z" + - match: { hits.total: 1 } + + - do: + search: + index: twitter + body: + query: + term: + level: 11 + - match: { hits.total: 0 } + +--- +"Set unsupported operation type": + - do: + index: + index: twitter + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + index: + index: twitter + type: tweet + id: 2 + body: { "user": "foo" } + - do: + indices.refresh: {} + + - do: + catch: request + update_by_query: + refresh: true + index: twitter + body: + script: + inline: if (ctx._source.user == "kimchy") {ctx.op = "update"} else {ctx.op = "junk"} + + - match: { error.reason: 'Operation type [junk] not allowed, only [noop, index, delete] are allowed' } From 371c73e14023917f3892090d2d3d7ff9ac1216b9 Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Thu, 2 Jun 2016 16:30:50 -0500 Subject: [PATCH 19/39] refactor matrix agg documentation from modules to main agg section --- docs/reference/aggregations.asciidoc | 7 +++++++ .../matrix.asciidoc} | 4 ++-- .../matrix/stats-aggregation.asciidoc} | 2 +- docs/reference/modules.asciidoc | 6 ------ 4 files changed, 10 insertions(+), 9 deletions(-) rename docs/reference/{modules/aggregations-matrix.asciidoc => aggregations/matrix.asciidoc} (78%) rename docs/reference/{modules/aggregations/matrix/stats.asciidoc => aggregations/matrix/stats-aggregation.asciidoc} (98%) diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index f3f24e8704c..ada134238bb 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -23,6 +23,11 @@ it is often easier to break them into three main families: <>:: Aggregations that keep track and compute metrics over a set of documents. +<>:: + A family of aggregations that operate on multiple fields and produce a matrix result based on the + values extracted from the requested document fields. Unlike metric and bucket aggregations, this + aggregation family does not yet support scripting. + <>:: Aggregations that aggregate the output of other aggregations and their associated metrics @@ -100,4 +105,6 @@ include::aggregations/bucket.asciidoc[] include::aggregations/pipeline.asciidoc[] +include::aggregations/matrix.asciidoc[] + include::aggregations/misc.asciidoc[] diff --git a/docs/reference/modules/aggregations-matrix.asciidoc b/docs/reference/aggregations/matrix.asciidoc similarity index 78% rename from docs/reference/modules/aggregations-matrix.asciidoc rename to docs/reference/aggregations/matrix.asciidoc index e8f741e7df6..e0d3d489ce1 100644 --- a/docs/reference/modules/aggregations-matrix.asciidoc +++ b/docs/reference/aggregations/matrix.asciidoc @@ -1,4 +1,4 @@ -[[modules-aggregations-matrix]] +[[search-aggregations-matrix]] == Matrix Aggregations experimental[] @@ -6,4 +6,4 @@ experimental[] The aggregations in this family operate on multiple fields and produce a matrix result based on the values extracted from the requested document fields. Unlike metric and bucket aggregations, this aggregation family does not yet support scripting. -include::aggregations/matrix/stats.asciidoc[] +include::matrix/stats-aggregation.asciidoc[] \ No newline at end of file diff --git a/docs/reference/modules/aggregations/matrix/stats.asciidoc b/docs/reference/aggregations/matrix/stats-aggregation.asciidoc similarity index 98% rename from docs/reference/modules/aggregations/matrix/stats.asciidoc rename to docs/reference/aggregations/matrix/stats-aggregation.asciidoc index 2649ecd5fe7..8dafb252f08 100644 --- a/docs/reference/modules/aggregations/matrix/stats.asciidoc +++ b/docs/reference/aggregations/matrix/stats-aggregation.asciidoc @@ -1,4 +1,4 @@ -[[modules-matrix-aggregations-stats]] +[[search-aggregations-matrix-stats-aggregation]] === Matrix Stats The `matrix_stats` aggregation is a numeric aggregation that computes the following statistics over a set of document fields: diff --git a/docs/reference/modules.asciidoc b/docs/reference/modules.asciidoc index 01b0edd0676..5a39cdfd790 100644 --- a/docs/reference/modules.asciidoc +++ b/docs/reference/modules.asciidoc @@ -18,10 +18,6 @@ These settings can be dynamically updated on a live cluster with the The modules in this section are: -<>:: - - A family of aggregations that operate on multiple document fields and produce a matrix as output. - <>:: Settings to control where, when, and how shards are allocated to nodes. @@ -84,8 +80,6 @@ The modules in this section are: -- -include::modules/aggregations-matrix.asciidoc[] - include::modules/cluster.asciidoc[] include::modules/discovery.asciidoc[] From 0a8afa2e7269ddbec3755b8f10033e93f55149c2 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 6 Jun 2016 15:14:09 +0200 Subject: [PATCH 20/39] Add back pending deletes (#18698) Triggering the pending deletes logic was accidentally removed in the clean up PR #18602. --- .../elasticsearch/indices/IndicesService.java | 17 ++++++++ .../cluster/IndicesClusterStateService.java | 40 ++++++++++++++++++- .../index/IndexWithShadowReplicasIT.java | 13 +++--- .../index/shard/IndexShardTests.java | 8 ++-- .../indices/IndicesServiceTests.java | 4 ++ .../org/elasticsearch/test/ESTestCase.java | 11 ++--- 6 files changed, 76 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 188e3608cb3..ba512379868 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -112,6 +112,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -141,6 +142,7 @@ public class IndicesService extends AbstractLifecycleComponent i private final CircuitBreakerService circuitBreakerService; private volatile Map indices = emptyMap(); private final Map> pendingDeletes = new HashMap<>(); + private final AtomicInteger numUncompletedDeletes = new AtomicInteger(); private final OldShardsStats oldShardsStats = new OldShardsStats(); private final IndexStoreConfig indexStoreConfig; private final MapperRegistry mapperRegistry; @@ -782,6 +784,7 @@ public class IndicesService extends AbstractLifecycleComponent i pendingDeletes.put(index, list); } list.add(pendingDelete); + numUncompletedDeletes.incrementAndGet(); } } @@ -840,6 +843,7 @@ public class IndicesService extends AbstractLifecycleComponent i logger.debug("{} processing pending deletes", index); final long startTimeNS = System.nanoTime(); final List shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis()); + int numRemoved = 0; try { Map locks = new HashMap<>(); for (ShardLock lock : shardLocks) { @@ -850,6 +854,7 @@ public class IndicesService extends AbstractLifecycleComponent i remove = pendingDeletes.remove(index); } if (remove != null && remove.isEmpty() == false) { + numRemoved = remove.size(); CollectionUtil.timSort(remove); // make sure we delete indices first final long maxSleepTimeMs = 10 * 1000; // ensure we retry after 10 sec long sleepTime = 10; @@ -896,6 +901,10 @@ public class IndicesService extends AbstractLifecycleComponent i } } finally { IOUtils.close(shardLocks); + if (numRemoved > 0) { + int remainingUncompletedDeletes = numUncompletedDeletes.addAndGet(-numRemoved); + assert remainingUncompletedDeletes >= 0; + } } } @@ -909,6 +918,14 @@ public class IndicesService extends AbstractLifecycleComponent i } } + /** + * Checks if all pending deletes have completed. Used by tests to ensure we don't check directory contents while deletion still ongoing. + * The reason is that, on Windows, browsing the directory contents can interfere with the deletion process and delay it unnecessarily. + */ + public boolean hasUncompletedPendingDeletes() { + return numUncompletedDeletes.get() > 0; + } + /** * Returns this nodes {@link IndicesQueriesRegistry} */ diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 719fb812c74..bbb1a063e8f 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -41,11 +42,14 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; @@ -67,7 +71,6 @@ import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; @@ -75,6 +78,8 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; /** * @@ -213,11 +218,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent deleting index " + IDX); assertAcked(client().admin().indices().prepareDelete(IDX)); - - // assertBusy(() -> assertPathHasBeenCleared(dataPath), 1, TimeUnit.MINUTES); + assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); + assertPathHasBeenCleared(dataPath); //norelease //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved. //assertIndicesDirsDeleted(nodes); @@ -647,8 +646,8 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertHitCount(resp, docCount); assertAcked(client().admin().indices().prepareDelete(IDX)); - - // assertBusy(() -> assertPathHasBeenCleared(dataPath), 1, TimeUnit.MINUTES); + assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); + assertPathHasBeenCleared(dataPath); //norelease //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved. //assertIndicesDirsDeleted(nodes); @@ -839,8 +838,8 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { logger.info("--> deleting closed index"); client().admin().indices().prepareDelete(IDX).get(); - - assertBusy(() -> assertPathHasBeenCleared(dataPath), 1, TimeUnit.MINUTES); + assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); + assertPathHasBeenCleared(dataPath); assertIndicesDirsDeleted(nodes); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 7774537c734..e7a7eb46377 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -558,7 +558,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); client().admin().indices().prepareDelete("test").get(); - assertBusyPathHasBeenCleared(idxPath); + assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); + assertPathHasBeenCleared(idxPath); } public void testExpectedShardSizeIsPresent() throws InterruptedException { @@ -641,8 +642,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); assertAcked(client().admin().indices().prepareDelete(INDEX)); - assertBusyPathHasBeenCleared(startDir.toAbsolutePath()); - assertBusyPathHasBeenCleared(endDir.toAbsolutePath()); + assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); + assertPathHasBeenCleared(startDir.toAbsolutePath()); + assertPathHasBeenCleared(endDir.toAbsolutePath()); } public void testShardStats() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 76f7a30e078..fa4183f689a 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -190,10 +190,12 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { assertTrue(path.exists()); assertEquals(indicesService.numPendingDeletes(test.index()), numPending); + assertTrue(indicesService.hasUncompletedPendingDeletes()); // shard lock released... we can now delete indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS)); assertEquals(indicesService.numPendingDeletes(test.index()), 0); + assertFalse(indicesService.hasUncompletedPendingDeletes()); assertFalse(path.exists()); if (randomBoolean()) { @@ -201,9 +203,11 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { indicesService.addPendingDelete(new ShardId(test.index(), 1), test.getIndexSettings()); indicesService.addPendingDelete(new ShardId("bogus", "_na_", 1), test.getIndexSettings()); assertEquals(indicesService.numPendingDeletes(test.index()), 2); + assertTrue(indicesService.hasUncompletedPendingDeletes()); // shard lock released... we can now delete indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS)); assertEquals(indicesService.numPendingDeletes(test.index()), 0); + assertFalse(indicesService.hasUncompletedPendingDeletes()); } assertAcked(client().admin().indices().prepareOpen("test")); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 972c1a50fbc..0859a8e86ea 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -56,6 +56,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisService; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.test.junit.listeners.LoggingListener; @@ -672,11 +673,11 @@ public abstract class ESTestCase extends LuceneTestCase { return enabled; } - /** - * Asserts busily that there are no files in the specified path - */ - public void assertBusyPathHasBeenCleared(Path path) throws Exception { - assertBusy(() -> assertPathHasBeenCleared(path)); + public void assertAllIndicesRemovedAndDeletionCompleted(Iterable indicesServices) throws Exception { + for (IndicesService indicesService : indicesServices) { + assertBusy(() -> assertFalse(indicesService.iterator().hasNext()), 1, TimeUnit.MINUTES); + assertBusy(() -> assertFalse(indicesService.hasUncompletedPendingDeletes()), 1, TimeUnit.MINUTES); + } } /** From e3e8f101032545bd86244d609dbac4975f45884e Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 6 Jun 2016 15:45:57 +0200 Subject: [PATCH 21/39] [TEST] fix assertion that index was fully deleted --- .../java/org/elasticsearch/indices/IndicesServiceTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index fa4183f689a..5761997ed46 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -207,7 +207,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { // shard lock released... we can now delete indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS)); assertEquals(indicesService.numPendingDeletes(test.index()), 0); - assertFalse(indicesService.hasUncompletedPendingDeletes()); + assertTrue(indicesService.hasUncompletedPendingDeletes()); // "bogus" index has not been removed } assertAcked(client().admin().indices().prepareOpen("test")); From 200d76e6f07fbaa2d0957b79fa8ddc279671e182 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 6 Jun 2016 11:27:26 -0400 Subject: [PATCH 22/39] Throw exception if using a closed transport client Today when attempting to use a closed transport client, an exception saying that no nodes are available is thrown. This is because when a transport client is closed, its internal list of nodes is cleared. But this exception is puzzling and can be made clearer. This commit changes the behavior so that attempting to execute a request using a closed transport client throws an illegal state exception. Relates #18722 --- .../TransportClientNodesService.java | 13 +++++- .../transport/TransportClientTests.java | 41 +++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index c3379c9ceaf..71c5895669c 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -211,7 +211,18 @@ public class TransportClientNodesService extends AbstractComponent { } public void execute(NodeListenerCallback callback, ActionListener listener) { - List nodes = this.nodes; + // we first read nodes before checking the closed state; this + // is because otherwise we could be subject to a race where we + // read the state as not being closed, and then the client is + // closed and the nodes list is cleared, and then a + // NoNodeAvailableException is thrown + // it is important that the order of first setting the state of + // closed and then clearing the list of nodes is maintained in + // the close method + final List nodes = this.nodes; + if (closed) { + throw new IllegalStateException("transport client is closed"); + } ensureNodesAreAvailable(nodes); int index = getNodeNumber(); RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java new file mode 100644 index 00000000000..ec2065b67e2 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.transport; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.object.HasToString.hasToString; + +public class TransportClientTests extends ESTestCase { + + public void testThatUsingAClosedClientThrowsAnException() throws ExecutionException, InterruptedException { + final TransportClient client = TransportClient.builder().settings(Settings.EMPTY).build(); + client.close(); + final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> client.admin().cluster().health(new ClusterHealthRequest()).get()); + assertThat(e, hasToString(containsString("transport client is closed"))); + } + +} From d8056c8213ea7d2fbd26dbe15e23e96acfbd180c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 6 Jun 2016 10:45:44 -0400 Subject: [PATCH 23/39] Add support for waiting until a refresh occurs This adds support for setting the refresh request parameter to `wait_for` in the `index`, `delete`, `update`, and `bulk` APIs. When `refresh=wait_for` is set those APIs will not return until their results have been made visible to search by a refresh. Also it adds a `forced_refresh` field to the response of `index`, `delete`, `update`, and to each item in a bulk response. This will be true for requests with `?refresh` or `?refresh=true` and will be true for some requests (see below) with `refresh=wait_for` but ought to otherwise always be false. `refresh=wait_for` is implemented as a list of `Tuple>`s in the new `RefreshListeners` class that is managed by `IndexShard`. The dynamic, index scoped `index.max_refresh_listeners` setting controls a maximum number of listeners allowed in any shard. If more than that many listeners accumulate in the engine then a refresh will be forced, the thread that adds the listener will be blocked until the refresh completes, and then the listener will be called with a `forcedRefresh` flag so it knows that it was the "straw that broke the camel's back". These listeners are only used by `refresh=wait_for` and that flag manifests itself as `forced_refresh` being `true` in the response. About half of this change comes from piping async-ness down to the appropriate layer in a way that is compatible with the ongoing with with sequence ids. Closes #1063 You can look up the winding story of all the commits here: https://github.com/elastic/elasticsearch/pull/17986 Here are the commit messages in case they are intersting to you: commit 59a753b89109828d2b8f0de05cb104fc663cf95e Author: Nik Everett Date: Mon Jun 6 10:18:23 2016 -0400 Replace a method reference with implementing an interface Saves a single allocation and forces more commonality between the WriteResults. commit 31f7861a85b457fb7378a6f27fa0a0c171538f68 Author: Nik Everett Date: Mon Jun 6 10:07:55 2016 -0400 Revert "Replace static method that takes consumer with delegate class that takes an interface" This reverts commit 777e23a6592c75db0081a53458cc760f4db69507. commit 777e23a6592c75db0081a53458cc760f4db69507 Author: Nik Everett Date: Mon Jun 6 09:29:35 2016 -0400 Replace static method that takes consumer with delegate class that takes an interface Same number of allocations, much less code duplication. commit 9b49a480ca9587a0a16ebe941662849f38289644 Author: Nik Everett Date: Mon Jun 6 08:25:38 2016 -0400 Patch from boaz commit c2bc36524fda119fd0514415127e8901d94409c8 Author: Nik Everett Date: Thu Jun 2 14:46:27 2016 -0400 Fix docs After updating to master we are actually testing them. commit 03975ac056e44954eb0a371149d410dcf303e212 Author: Nik Everett Date: Thu Jun 2 14:20:11 2016 -0400 Cleanup after merge from master commit 9c9a1deb002c5bebb2a997c89fa12b3d7978e02e Author: Nik Everett Date: Thu Jun 2 14:09:14 2016 -0400 Breaking changes notes commit 1c3e64ae06c07a85f7af80534fab88279adb30b4 Merge: 9e63ad6 f67e580 Author: Nik Everett Date: Thu Jun 2 14:00:05 2016 -0400 Merge branch 'master' into block_until_refresh2 commit 9e63ad6de52d0b28f0b6d7203721baf1ebf6f56b Author: Nik Everett Date: Thu Jun 2 13:21:27 2016 -0400 Test for TransportWriteAction commit 522ecb59d39b3c9e8df0d3b8df34b9e7aeaf0ce9 Author: Nik Everett Date: Thu Jun 2 10:30:18 2016 -0400 Document deprecation commit 0cd67b947f58867e704a1f0e66928a6fb5a11f11 Author: Nik Everett Date: Thu Jun 2 10:26:23 2016 -0400 Deprecate setRefresh(boolean) Users should use `setRefresh(RefreshPolicy)` instead. commit aeb1be3f2c501990b33fb1f8230d496035f498ef Author: Nik Everett Date: Thu Jun 2 10:12:27 2016 -0400 Remove checkstyle suppression It is fixed commit 00d09a9caa638b6f90f4896b5502dd98d8fad56e Author: Nik Everett Date: Thu Jun 2 10:08:28 2016 -0400 Improve comment commit 788164b898a6ee2878a273961230122b7386c3c9 Author: Nik Everett Date: Thu Jun 2 10:01:01 2016 -0400 S/ReplicatedWriteResponse/WriteResponse/ Now it lines up with WriteRequest. commit b74cf3fe778352b140355afcaa08d3d4412d749d Author: Nik Everett Date: Wed Jun 1 18:27:52 2016 -0400 Preserve `?refresh` behavior `?refresh` means the same things as `?refresh=true`. commit 30f972bdaeaaa0de6fe67746cdb8628aa86f5a8c Author: Nik Everett Date: Wed Jun 1 17:39:05 2016 -0400 Handle hanging documents If a document is added to the index during a refresh we weren't properly firing its refresh listener. This happened because the way we detect whether a refresh makes something visible or not is imperfect. It is ok because it always errs on the side of thinking that something isn't yet visible. So when a document arrives during a refresh the refresh listeners won't think it made it into a refresh when, often, it does. The way we work around this is by telling Elasticsearch that it ought to trigger a refresh if there are any pending refresh listeners even if there aren't pending documents to update. Lucene short circuits the refresh so it doesn't take that much effort, but the refresh listeners still get the signal that a refresh has come in and they still pick up the change and notify the listener. This means that the time that a listener can wait is actually slightly longer than the refresh interval. commit d523b5702b60c7ba309fb0dcf3cd3a4798f11960 Author: Nik Everett Date: Wed Jun 1 14:34:01 2016 -0400 Explain Integer.MAX_VALUE commit 4ffb7c0e954343cc1c04b3d7be2ebad66d3a016b Author: Nik Everett Date: Wed Jun 1 14:27:39 2016 -0400 Fire all refresh listeners in a single thread Rather than queueing a runnable each. commit 19606ec3bbe612095df45eba734c5b7eb2709c01 Author: Nik Everett Date: Wed Jun 1 14:09:52 2016 -0400 Assert translog ordering commit 6bb4e5c75e850f4a42518f06fbc955f7ec76d245 Author: Nik Everett Date: Wed Jun 1 13:17:44 2016 -0400 Support null RefreshListeners in InternalEngine Just skip using it. commit 74be1480d6e44af2b354ff9ea47c234d4870b6c2 Author: Nik Everett Date: Tue May 31 18:02:03 2016 -0400 Move funny ShardInfo hack for bulk into bulk This should make it easier to understand because it is closer to where it matters.... commit 2b771f8dabd488e056cfdc9989608d18264ddfb0 Author: Nik Everett Date: Tue May 31 17:39:46 2016 -0400 Pull listener out into an inner class with javadoc and stuff commit 058481ad72019c0492b03a7a4ac32a48673697d3 Author: Nik Everett Date: Tue May 31 17:33:42 2016 -0400 Fix javadoc links commit d2123b1cabf29bce8ff561d4a4c1c1d5b42bccad Author: Nik Everett Date: Tue May 31 17:28:09 2016 -0400 Make more stuff final commit 8453fc4f7850f6a02fb5971c17a942a3e3fd9f7b Author: Nik Everett Date: Tue May 31 17:26:48 2016 -0400 Javadoc commit fb16d2fc7016c1e8e1621d481e8781c7ef43326c Author: Nik Everett Date: Tue May 31 16:14:48 2016 -0400 Rewrite refresh docs commit 5797d1b1c4d233c0db918c0d08c21731ddccd05e Author: Nik Everett Date: Tue May 31 15:02:34 2016 -0400 Fix forced_refresh flag It wasn't being set. commit 43ce50a1de250a9e073a2ca6cbf55c1b4c74b11b Author: Nik Everett Date: Tue May 31 14:02:56 2016 -0400 Delay translog sync and flush until after refresh The sync might have occurred for us during the refresh so we have less work to do. Maybe. commit bb2739202e084703baf02cfa58f09517598cf14e Author: Nik Everett Date: Tue May 31 13:08:08 2016 -0400 Remove duplication in WritePrimaryResult and WriteReplicaResult commit 2f579f89b4867a880396f2e7fcffc508449ff2de Author: Nik Everett Date: Tue May 31 12:19:05 2016 -0400 Clean up registration of RefreshListeners commit 87ab6e60ca5ba945bf0fba84784b2bbe53506abf Author: Nik Everett Date: Tue May 31 11:28:30 2016 -0400 Shorten lock time in RefreshListeners Also use null to represent no listeners rather than an empty list. This saves allocating a new ArrayList every refresh cycle on every index. commit 0d49d9c5720dadfb67da3fa760397bf6d874601c Author: Nik Everett Date: Tue May 24 10:46:18 2016 -0400 Flip relationship between RefreshListeners and Engine Now RefreshListeners comes to Engine from EngineConfig. commit b2704b8a39382953f8f91a9743e894ee289f7514 Author: Nik Everett Date: Tue May 24 09:37:58 2016 -0400 Remove unused imports Maybe I added them? commit 04343a22647f19304d9dc716b3fac9b183227f63 Author: Nik Everett Date: Tue May 24 09:37:52 2016 -0400 Javadoc commit da1e765678890a02d61d8a29aa433274beb5e00c Author: Nik Everett Date: Tue May 24 09:26:35 2016 -0400 Reply with non-null Also move the fsync and flush to before the refresh listener stuff. commit 5d8eecd0d904b497844b4c81c46477bd6178ed3a Author: Nik Everett Date: Tue May 24 08:58:47 2016 -0400 Remove funky synchronization in AsyncReplicaAction commit 1ec71eea0f4e1228ae1497d982307be818ef4b65 Author: Nik Everett Date: Tue May 24 08:01:14 2016 -0400 s/LinkedTransferQueue/ArrayList/ commit 7da36a4ceed2ccf7955138c3b005237fa41efcb4 Author: Nik Everett Date: Tue May 24 07:46:38 2016 -0400 More cleanup for RefreshListeners commit 957e9b77007c32ee75dde152c6622bab065d5993 Author: Nik Everett Date: Tue May 24 07:34:13 2016 -0400 /Consumer/Executor/ commit 4d8bf5d4a70dcc56150c8d8d14165cd23d308b3c Author: Nik Everett Date: Mon May 23 22:20:42 2016 -0400 explain commit 15d948a348089bb2937eec5ac4e96f3ec67dbe32 Author: Nik Everett Date: Mon May 23 22:17:59 2016 -0400 Better.... commit dc28951d02973fc03b4d51913b5f96de14b75607 Author: Nik Everett Date: Mon May 23 21:09:20 2016 -0400 Javadocs and compromises commit 8eebaa89c0a1ee74982fbe0d56d1485ca2ae09db Author: Nik Everett Date: Mon May 23 20:52:49 2016 -0400 Take boaz's changes to their logic conclusion and unbreak important stuff like bulk commit 7056b96ea412f275005b93e3570bcff895859ed5 Author: Nik Everett Date: Mon May 23 15:49:32 2016 -0400 Patch from boaz commit 87be7eaed09a274cc6a99d1a3da81d2d7bf9dd64 Author: Nik Everett Date: Mon May 23 15:49:13 2016 -0400 Revert "Move async parts of replica operation outside of the lock" This reverts commit 13807ad10b6f5ecd39f98c9f20874f9f352c5bc2. commit 13807ad10b6f5ecd39f98c9f20874f9f352c5bc2 Author: Nik Everett Date: Fri May 20 22:53:15 2016 -0400 Move async parts of replica operation outside of the lock commit b8cadcef565908b276484f7f5f988fd58b38d8b6 Author: Nik Everett Date: Fri May 20 16:17:20 2016 -0400 Docs commit 91149e0580233bf79c2273b419fe9374ca746648 Author: Nik Everett Date: Fri May 20 15:17:40 2016 -0400 Finally! commit 1ff50c2faf56665d221f00a18d9ac88745904bf5 Author: Nik Everett Date: Fri May 20 15:01:53 2016 -0400 Remove Translog#lastWriteLocation I wasn't being careful enough with locks so it wasn't right anyway. Instead this builds a synthetic Tranlog.Location when you call getWriteLocation with much more relaxed equality guarantees. Rather than being equal to the last Translog.Location returned it is simply guaranteed to be greater than the last translog returned and less than the next. commit 55596ea68b5484490c3637fbad0d95564236478b Author: Nik Everett Date: Fri May 20 14:40:06 2016 -0400 Remove listener from shardOperationOnPrimary Create instead asyncShardOperationOnPrimary which is called after all of the replica operations are started to handle any async operations. commit 3322e26211bf681b37132274ee158ae330afc28b Author: Nik Everett Date: Tue May 17 17:20:02 2016 -0400 Increase default maximum number of listeners to 1000 commit 88171a8322a424e624d48960fb4c98dd43e4d671 Author: Nik Everett Date: Tue May 17 16:40:57 2016 -0400 Rename test commit 179c27c4f829f2c6ded65967652cf85adaf2ae52 Author: Nik Everett Date: Tue May 17 16:35:27 2016 -0400 Move refresh listeners into their own class They still live at the IndexShard level but they live on their own in RefreshListeners which interacts with IndexShard using a couple of callbacks and a registration method. This lets us test the listeners without standing up an entire IndexShard. We still test the listeners against an InternalEngine, because the interplay between InternalEngine, Translog, and RefreshListeners is complex and important to get right. commit d8926d5fc1d24b4da8ccff7e0f0907b98c583c41 Author: Nik Everett Date: Tue May 17 11:02:38 2016 -0400 Move refresh listeners into IndexShard commit df91cde398eb720143a85a8c6fa19bdc3a74e07d Author: Nik Everett Date: Mon May 16 16:01:03 2016 -0400 unused import commit 066da45b08148b266e4173166662fc1b3f66ed53 Author: Nik Everett Date: Mon May 16 15:54:11 2016 -0400 Remove RefreshListener interface Just pass a Translog.Location and a Consumer when registering. commit b971d6d3301c7522b2e7eb90d5d8dd96a77fa625 Author: Nik Everett Date: Mon May 16 14:41:06 2016 -0400 Docs for setForcedRefresh commit 6c43be821eaf61141d3ec520f988aad3a96a3941 Author: Nik Everett Date: Mon May 16 14:34:39 2016 -0400 Rename refresh setter and getter commit e61b7391f91263a4c4d6107bfbc2a828bbcc805c Author: Nik Everett Date: Mon Apr 25 22:48:09 2016 -0400 Trigger listeners even when there is no refresh Each refresh gives us an opportunity to pick up any listeners we may have left behind. commit 0c9b0477085c021f503db775640d25668e02f635 Author: Nik Everett Date: Mon Apr 25 20:30:06 2016 -0400 REST commit 8250343240de7e63118c663a230a7a314807a754 Author: Nik Everett Date: Mon Apr 25 19:34:22 2016 -0400 Switch to estimated count We don't need a linear time count of the number of listeners - a volatile variable is good enough to guess. It probably undercounts more than it overcounts but it isn't a huge problem. commit bd531167fe54f1bde6f6d4ddb0a8de5a7bcc18a2 Author: Nik Everett Date: Mon Apr 25 18:21:02 2016 -0400 Don't try and set forced refresh on bulk items without a response NullPointerExceptions are bad. If the entire request fails then the user has worse problems then "did these force a refresh". commit bcfded11515af5e0b3c3e36f3c2f73f5cd26512e Author: Nik Everett Date: Mon Apr 25 18:14:20 2016 -0400 Replace LinkedList and synchronized with LinkedTransferQueue commit 8a80cc70a76375a7593745884cb987535b37ca80 Author: Nik Everett Date: Mon Apr 25 17:38:24 2016 -0400 Support for update commit 1f36966742f851b7328015151ef6fc8f95299af2 Author: Nik Everett Date: Mon Apr 25 15:46:06 2016 -0400 Cleanup translog tests commit 8d121bf35eb265b8a0aee9710afeb1b054a113d4 Author: Nik Everett Date: Mon Apr 25 15:40:53 2016 -0400 Cleanup listener implementation Much more testing too! commit 2058f4a808762c4588309f21b13b677245832f2c Author: Nik Everett Date: Mon Apr 25 11:45:55 2016 -0400 Pass back information about whether we refreshed commit e445cb0cb91ebdbcfdbf566696edb2bf1c84a882 Author: Nik Everett Date: Mon Apr 25 11:03:31 2016 -0400 Javadoc commit 611cbeeaeb458f4b428bfc43a1ee6652adf4baff Author: Nik Everett Date: Mon Apr 25 11:01:40 2016 -0400 Move ReplicationResponse now it is in the same package as its request commit 9919758b644fd73895fb88cd6a4909a8387eb2e2 Author: Nik Everett Date: Mon Apr 25 11:00:14 2016 -0400 Oh boy that wasn't working commit 247cb483c4459dea8e95e0e3bd2e4bf8d452c598 Author: Nik Everett Date: Mon Apr 25 10:29:37 2016 -0400 Basic block_until_refresh exposed to java client and basic "is it plugged in" style tests. commit 46c855c9971cb2b748206d2afa6a2d88724be3ba Author: Nik Everett Date: Mon Apr 25 10:11:10 2016 -0400 Move test to own class commit a5ffd892d0a352ae7e9757f2640fc2a1fa656bf2 Author: Nik Everett Date: Mon Apr 25 07:44:25 2016 -0400 WIP commit 213bebb6ece11b85d17e44af9a54fc2e5e332d39 Author: Nik Everett Date: Fri Apr 22 21:35:52 2016 -0400 Add refresh listeners commit a2bc7f30e6d4857a1224ef5a89909b36c8f33731 Author: Nik Everett Date: Fri Apr 22 21:11:55 2016 -0400 Return last written location from refresh commit 85033a87551da89f36a23d4dfd5016db218e08ee Author: Nik Everett Date: Fri Apr 22 20:28:21 2016 -0400 Never reply to replica actions while you have the operation lock This last thing was causing periodic test failures because we were replying while we had the operation lock. Now, we probably could get away with that in most cases but the tests don't like it and it isn't a good idea to do network io while you have a lock anyway. So this prevents it. commit 1f25cf35e796835b3827b8a4110e09e5de61784c Author: Nik Everett Date: Fri Apr 22 19:56:18 2016 -0400 Cleanup commit 52c5f7c3f04710901f503334239a611c0e21c85a Author: Nik Everett Date: Fri Apr 22 19:33:00 2016 -0400 Add a listener to shard operations commit 5b142dc331214c8eef90587144f4b3f959f9eced Author: Nik Everett Date: Fri Apr 22 18:03:52 2016 -0400 Cleanup commit 3d22b2d7ceb473db339259452a7c4f117ce86069 Author: Nik Everett Date: Fri Apr 22 17:59:55 2016 -0400 Push the listener into shardOperationOnPrimary commit 34b378943b8185451acf6350f661c0ad33b5836d Author: Nik Everett Date: Fri Apr 22 17:48:47 2016 -0400 Doc commit b42b8da968d42cc7414020c7b199606a5dcce50a Author: Nik Everett Date: Fri Apr 22 17:45:40 2016 -0400 Don't finish early if the primary finishes early We use a "fake" pending shard that we resolve when the replicas have all started. commit 0fc045b56e1e02a48c30383ac50a281d5af7e0b6 Author: Nik Everett Date: Fri Apr 22 17:30:06 2016 -0400 Make performOnPrimary asyncS Instead of returning Tuple it returns ReplicaRequest and takes a ActionListener as an argument. We call the listener immediately to preserve backwards compatibility for now. commit 80119b9a26ede96a865af45904c3ac69d5b19b59 Author: Nik Everett Date: Fri Apr 22 16:51:53 2016 -0400 Factor out common code in shardOperationOnPrimary commit 0642083676702618f900fa842c08802a04c1a53e Author: Nik Everett Date: Fri Apr 22 16:32:29 2016 -0400 Factor out common code from shardOperationOnReplica commit 8bdc415fedaaa9f2d0c555590a13ec4699a7c3f7 Author: Nik Everett Date: Fri Apr 22 16:23:28 2016 -0400 Create ReplicatedMutationRequest Superclass for index, delete, and bulkShard requests. commit 0f8fa846a2822c4293df32fed18c9b99660b39ff Author: Nik Everett Date: Fri Apr 22 16:10:30 2016 -0400 Create TransportReplicatedMutationAction It is the superclass of replication actions that mutate data: index, delete, and shardBulk. shardFlush and shardRefresh are replication actions but they do not extend TransportReplicatedMutationAction because they don't change the data, only shuffle it around. --- .../resources/checkstyle_suppressions.xml | 2 - .../action/DocWriteResponse.java | 27 +- .../indices/flush/TransportFlushAction.java | 2 +- .../flush/TransportShardFlushAction.java | 10 +- .../refresh/TransportRefreshAction.java | 2 +- .../refresh/TransportShardRefreshAction.java | 20 +- .../action/bulk/BulkRequest.java | 38 +-- .../action/bulk/BulkRequestBuilder.java | 14 +- .../action/bulk/BulkShardRequest.java | 25 +- .../action/bulk/BulkShardResponse.java | 20 +- .../action/bulk/TransportBulkAction.java | 3 +- .../action/bulk/TransportShardBulkAction.java | 50 +-- .../action/delete/DeleteRequest.java | 21 +- .../action/delete/DeleteRequestBuilder.java | 14 +- .../action/delete/TransportDeleteAction.java | 37 +-- .../action/index/IndexRequest.java | 21 +- .../action/index/IndexRequestBuilder.java | 14 +- .../action/index/TransportIndexAction.java | 42 +-- .../action/ingest/IngestActionFilter.java | 2 +- .../action/support/WriteRequest.java | 109 +++++++ .../action/support/WriteRequestBuilder.java | 50 +++ .../action/support/WriteResponse.java | 40 +++ .../replication/ReplicatedWriteRequest.java | 72 +++++ .../replication/ReplicationOperation.java | 105 ++++--- .../replication/ReplicationRequest.java | 6 +- .../replication}/ReplicationResponse.java | 10 +- .../TransportBroadcastReplicationAction.java | 1 - .../TransportReplicationAction.java | 147 +++++---- .../replication/TransportWriteAction.java | 227 ++++++++++++++ .../action/update/TransportUpdateAction.java | 7 +- .../action/update/UpdateHelper.java | 7 +- .../action/update/UpdateRequest.java | 25 +- .../action/update/UpdateRequestBuilder.java | 17 +- .../common/settings/IndexScopedSettings.java | 1 + .../elasticsearch/index/IndexSettings.java | 21 ++ .../elasticsearch/index/engine/Engine.java | 7 + .../index/engine/EngineConfig.java | 16 +- .../index/engine/InternalEngine.java | 4 + .../index/engine/ShadowEngine.java | 4 +- .../elasticsearch/index/shard/IndexShard.java | 38 ++- .../index/shard/RefreshListeners.java | 208 +++++++++++++ .../index/shard/ShadowIndexShard.java | 13 + .../index/translog/Translog.java | 15 + .../index/translog/TranslogWriter.java | 1 + .../rest/action/bulk/RestBulkAction.java | 2 +- .../rest/action/delete/RestDeleteAction.java | 2 +- .../rest/action/index/RestIndexAction.java | 2 +- .../rest/action/update/RestUpdateAction.java | 2 +- .../action/bulk/BulkRequestTests.java | 17 +- .../action/bulk/BulkShardRequestTests.java | 7 +- .../BroadcastReplicationTests.java | 1 - .../ReplicationOperationTests.java | 67 ++-- .../TransportReplicationActionTests.java | 62 ++-- .../TransportWriteActionTests.java | 190 ++++++++++++ .../elasticsearch/aliases/IndexAliasesIT.java | 14 +- .../cluster/allocation/ClusterRerouteIT.java | 3 +- .../elasticsearch/document/ShardInfoIT.java | 2 +- .../index/WaitUntilRefreshIT.java | 217 +++++++++++++ .../index/engine/InternalEngineTests.java | 35 ++- .../index/engine/ShadowEngineTests.java | 28 +- ...lFieldMapperPositionIncrementGapTests.java | 3 +- .../index/shard/RefreshListenersTests.java | 292 ++++++++++++++++++ .../index/translog/TranslogTests.java | 24 ++ .../elasticsearch/routing/AliasRoutingIT.java | 29 +- .../routing/SimpleRoutingIT.java | 28 +- .../search/child/ChildQuerySearchIT.java | 8 +- .../suggest/CompletionSuggestSearch2xIT.java | 25 +- docs/reference/docs.asciidoc | 4 + docs/reference/docs/bulk.asciidoc | 8 +- docs/reference/docs/delete.asciidoc | 8 +- docs/reference/docs/index_.asciidoc | 15 +- docs/reference/docs/refresh.asciidoc | 109 +++++++ docs/reference/docs/update.asciidoc | 5 +- docs/reference/index-modules.asciidoc | 5 + .../migration/migrate_5_0/docs.asciidoc | 10 + .../migration/migrate_5_0/java.asciidoc | 5 + .../elasticsearch/messy/tests/BulkTests.java | 9 +- .../percolator/PercolatorIT.java | 39 +-- .../resources/rest-api-spec/api/bulk.json | 5 +- .../resources/rest-api-spec/api/delete.json | 5 +- .../resources/rest-api-spec/api/index.json | 5 +- .../resources/rest-api-spec/api/update.json | 5 +- .../rest-api-spec/test/bulk/50_refresh.yaml | 48 +++ .../rest-api-spec/test/create/60_refresh.yaml | 42 ++- .../rest-api-spec/test/delete/50_refresh.yaml | 76 ++++- .../rest-api-spec/test/index/60_refresh.yaml | 42 ++- .../rest-api-spec/test/update/60_refresh.yaml | 70 ++++- 87 files changed, 2542 insertions(+), 548 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/support/WriteRequest.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/WriteResponse.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java rename core/src/main/java/org/elasticsearch/action/{ => support/replication}/ReplicationResponse.java (96%) create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java create mode 100644 core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java create mode 100644 core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java create mode 100644 core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java create mode 100644 docs/reference/docs/refresh.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 0245cda2fe1..3c5caf2d4e2 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -19,7 +19,6 @@ - @@ -101,7 +100,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 4df43b75401..0925c744144 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -18,10 +18,15 @@ */ package org.elasticsearch.action; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -30,12 +35,13 @@ import java.io.IOException; /** * A base class for the response of a write operation that involves a single doc */ -public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent { +public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent { private ShardId shardId; private String id; private String type; private long version; + private boolean forcedRefresh; public DocWriteResponse(ShardId shardId, String type, String id, long version) { this.shardId = shardId; @@ -84,6 +90,20 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St return this.version; } + /** + * Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to + * {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will + * only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + */ + public boolean forcedRefresh() { + return forcedRefresh; + } + + @Override + public void setForcedRefresh(boolean forcedRefresh) { + this.forcedRefresh = forcedRefresh; + } + /** returns the rest status for this response (based on {@link ShardInfo#status()} */ public RestStatus status() { return getShardInfo().status(); @@ -97,6 +117,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St type = in.readString(); id = in.readString(); version = in.readZLong(); + forcedRefresh = in.readBoolean(); } @Override @@ -106,6 +127,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St out.writeString(type); out.writeString(id); out.writeZLong(version); + out.writeBoolean(forcedRefresh); } static final class Fields { @@ -121,7 +143,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St builder.field(Fields._INDEX, shardId.getIndexName()) .field(Fields._TYPE, type) .field(Fields._ID, id) - .field(Fields._VERSION, version); + .field(Fields._VERSION, version) + .field("forced_refresh", forcedRefresh); shardInfo.toXContent(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 8bb124d8fc4..a29918b438e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 7e750b97677..82fb6d70ca4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,14 +19,13 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -55,18 +54,19 @@ public class TransportShardFlushAction extends TransportReplicationAction shardOperationOnPrimary(ShardFlushRequest shardRequest) { + protected PrimaryResult shardOperationOnPrimary(ShardFlushRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", indexShard.shardId()); - return new Tuple<>(new ReplicationResponse(), shardRequest); + return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override - protected void shardOperationOnReplica(ShardFlushRequest request) { + protected ReplicaResult shardOperationOnReplica(ShardFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); indexShard.flush(request.getRequest()); logger.trace("{} flush request executed on replica", indexShard.shardId()); + return new ReplicaResult(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 34bf39daabd..ac64e276778 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 0670c1f3cc6..d7d0c289953 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -19,15 +19,14 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -36,10 +35,8 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -/** - * - */ -public class TransportShardRefreshAction extends TransportReplicationAction { +public class TransportShardRefreshAction + extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; @@ -47,8 +44,8 @@ public class TransportShardRefreshAction extends TransportReplicationAction shardOperationOnPrimary(BasicReplicationRequest shardRequest) { + protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on primary", indexShard.shardId()); - return new Tuple<>(new ReplicationResponse(), shardRequest); + return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override - protected void shardOperationOnReplica(BasicReplicationRequest request) { + protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request) { final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on replica", indexShard.shardId()); + return new ReplicaResult(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index a5775656475..85d7147ada0 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -54,16 +55,21 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * Note that we only support refresh on the bulk request not per item. * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ -public class BulkRequest extends ActionRequest implements CompositeIndicesRequest { +public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { private static final int REQUEST_OVERHEAD = 50; + /** + * Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and + * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare + * the one with the least casts. + */ final List> requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; - private boolean refresh = false; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; private long sizeInBytes = 0; @@ -437,18 +443,15 @@ public class BulkRequest extends ActionRequest implements Composite return this.consistencyLevel; } - /** - * Should a refresh be executed post this bulk operation causing the operations to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public BulkRequest refresh(boolean refresh) { - this.refresh = refresh; + @Override + public BulkRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return this; } - public boolean refresh() { - return this.refresh; + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } /** @@ -483,7 +486,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (ActionRequest actionRequest : requests) { + for (ActionRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -503,10 +506,9 @@ public class BulkRequest extends ActionRequest implements Composite } for (ActionRequest request : requests) { // We first check if refresh has been set - if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) || - (request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) || - (request instanceof IndexRequest && ((IndexRequest)request).refresh())) { - validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException); + if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { + validationException = addValidationError( + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } ActionRequestValidationException ex = request.validate(); if (ex != null) { @@ -541,7 +543,7 @@ public class BulkRequest extends ActionRequest implements Composite requests.add(request); } } - refresh = in.readBoolean(); + refreshPolicy = RefreshPolicy.readFrom(in); timeout = TimeValue.readTimeValue(in); } @@ -560,7 +562,7 @@ public class BulkRequest extends ActionRequest implements Composite } request.writeTo(out); } - out.writeBoolean(refresh); + refreshPolicy.writeTo(out); timeout.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 3744055d26c..4f2b7aa702e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -35,7 +36,8 @@ import org.elasticsearch.common.unit.TimeValue; * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * it in a single batch. */ -public class BulkRequestBuilder extends ActionRequestBuilder { +public class BulkRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) { super(client, action, new BulkRequest()); @@ -116,16 +118,6 @@ public class BulkRequestBuilder extends ActionRequestBuildertrue. Defaults - * to false. - */ - public BulkRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. */ diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 874789e8d61..321b7e2a8e5 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -31,23 +31,17 @@ import java.util.List; /** * */ -public class BulkShardRequest extends ReplicationRequest { +public class BulkShardRequest extends ReplicatedWriteRequest { private BulkItemRequest[] items; - private boolean refresh; - public BulkShardRequest() { } - BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) { + BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { super(shardId); this.items = items; - this.refresh = refresh; - } - - boolean refresh() { - return this.refresh; + setRefreshPolicy(refreshPolicy); } BulkItemRequest[] items() { @@ -77,7 +71,6 @@ public class BulkShardRequest extends ReplicationRequest { out.writeBoolean(false); } } - out.writeBoolean(refresh); } @Override @@ -89,7 +82,6 @@ public class BulkShardRequest extends ReplicationRequest { items[i] = BulkItemRequest.readBulkItem(in); } } - refresh = in.readBoolean(); } @Override @@ -97,8 +89,15 @@ public class BulkShardRequest extends ReplicationRequest { // This is included in error messages so we'll try to make it somewhat user friendly. StringBuilder b = new StringBuilder("BulkShardRequest to ["); b.append(index).append("] containing [").append(items.length).append("] requests"); - if (refresh) { + switch (getRefreshPolicy()) { + case IMMEDIATE: b.append(" and a refresh"); + break; + case WAIT_UNTIL: + b.append(" blocking until refresh"); + break; + case NONE: + break; } return b.toString(); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 76c80a9b064..22260181bb1 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -19,7 +19,9 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ReplicationResponse; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -29,7 +31,7 @@ import java.io.IOException; /** * */ -public class BulkShardResponse extends ReplicationResponse { +public class BulkShardResponse extends ReplicationResponse implements WriteResponse { private ShardId shardId; private BulkItemResponse[] responses; @@ -50,6 +52,20 @@ public class BulkShardResponse extends ReplicationResponse { return responses; } + @Override + public void setForcedRefresh(boolean forcedRefresh) { + /* + * Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the + * response. + */ + for (BulkItemResponse response : responses) { + DocWriteResponse r = response.getResponse(); + if (r != null) { + r.setForcedRefresh(forcedRefresh); + } + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 667e691f6c8..4cbebd0739a 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -344,7 +344,8 @@ public class TransportBulkAction extends HandledTransportAction> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); - BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); + BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.getRefreshPolicy(), + requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.timeout(bulkRequest.timeout()); if (task != null) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index a2f642374b7..4ad1136e668 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -30,7 +30,8 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.ReplicationRequest; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -53,6 +54,7 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -67,7 +69,7 @@ import static org.elasticsearch.action.support.replication.ReplicationOperation. /** * Performs the index operation. */ -public class TransportShardBulkAction extends TransportReplicationAction { +public class TransportShardBulkAction extends TransportWriteAction { private final static String OP_TYPE_UPDATE = "update"; private final static String OP_TYPE_DELETE = "delete"; @@ -83,9 +85,8 @@ public class TransportShardBulkAction extends TransportReplicationAction shardOperationOnPrimary(BulkShardRequest request) { + protected WriteResult onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { ShardId shardId = request.shardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexShard indexShard = indexService.getShard(shardId.getId()); final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; @@ -121,13 +121,13 @@ public class TransportShardBulkAction extends TransportReplicationAction(new BulkShardResponse(request.shardId(), responses), request); + BulkShardResponse response = new BulkShardResponse(request.shardId(), responses); + return new WriteResult<>(response, location); } private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { @@ -154,9 +154,9 @@ public class TransportShardBulkAction extends TransportReplicationAction result = shardIndexOperation(request, indexRequest, metaData, indexShard, true); - location = locationToSync(location, result.location); + location = locationToSync(location, result.getLocation()); // add the response - IndexResponse indexResponse = result.response(); + IndexResponse indexResponse = result.getResponse(); setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it @@ -197,8 +197,8 @@ public class TransportShardBulkAction extends TransportReplicationAction writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = writeResult.response(); - location = locationToSync(location, writeResult.location); + DeleteResponse deleteResponse = writeResult.getResponse(); + location = locationToSync(location, writeResult.getLocation()); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it @@ -237,16 +237,17 @@ public class TransportShardBulkAction extends TransportReplicationAction result = updateResult.writeResult; IndexRequest indexRequest = updateResult.request(); BytesReference indexSourceAsBytes = indexRequest.source(); // add the response - IndexResponse indexResponse = result.response(); + IndexResponse indexResponse = result.getResponse(); UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated()); if (updateRequest.fields() != null && updateRequest.fields().length > 0) { Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); @@ -256,8 +257,9 @@ public class TransportShardBulkAction extends TransportReplicationAction writeResult = updateResult.writeResult; - DeleteResponse response = writeResult.response(); + DeleteResponse response = writeResult.getResponse(); DeleteRequest deleteRequest = updateResult.request(); updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); @@ -326,11 +328,14 @@ public class TransportShardBulkAction extends TransportReplicationAction shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, + IndexShard indexShard, boolean processed) throws Throwable { MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); if (!processed) { @@ -431,12 +436,8 @@ public class TransportShardBulkAction extends TransportReplicationAction implements DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocumentRequest { private String type; private String id; @@ -51,7 +51,6 @@ public class DeleteRequest extends ReplicationRequest implements private String routing; @Nullable private String parent; - private boolean refresh; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -165,20 +164,6 @@ public class DeleteRequest extends ReplicationRequest implements return this.routing; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public DeleteRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public boolean refresh() { - return this.refresh; - } - /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. @@ -208,7 +193,6 @@ public class DeleteRequest extends ReplicationRequest implements id = in.readString(); routing = in.readOptionalString(); parent = in.readOptionalString(); - refresh = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); } @@ -220,7 +204,6 @@ public class DeleteRequest extends ReplicationRequest implements out.writeString(id); out.writeOptionalString(routing()); out.writeOptionalString(parent()); - out.writeBoolean(refresh); out.writeLong(version); out.writeByte(versionType.getValue()); } diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 0ce907bac1d..b9b0f95f8de 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.delete; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; @@ -27,7 +28,8 @@ import org.elasticsearch.index.VersionType; /** * A delete document action request builder. */ -public class DeleteRequestBuilder extends ReplicationRequestBuilder { +public class DeleteRequestBuilder extends ReplicationRequestBuilder + implements WriteRequestBuilder { public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) { super(client, action, new DeleteRequest()); @@ -71,16 +73,6 @@ public class DeleteRequestBuilder extends ReplicationRequestBuildertrue. Defaults - * to false. - */ - public DeleteRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 62d46766c47..beced23c338 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -27,19 +27,19 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -49,7 +49,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportReplicationAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -60,9 +60,8 @@ public class TransportDeleteAction extends TransportReplicationAction shardOperationOnPrimary(DeleteRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); - final WriteResult result = executeDeleteRequestOnPrimary(request, indexShard); - processAfterWrite(request.refresh(), indexShard, result.location); - return new Tuple<>(result.response, request); + protected WriteResult onPrimaryShard(DeleteRequest request, IndexShard indexShard) { + return executeDeleteRequestOnPrimary(request, indexShard); + } + + @Override + protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { + return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); } public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { @@ -134,9 +135,8 @@ public class TransportDeleteAction extends TransportReplicationAction( - new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()), - delete.getTranslogLocation()); + DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()); + return new WriteResult<>(response, delete.getTranslogLocation()); } public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { @@ -144,13 +144,4 @@ public class TransportDeleteAction extends TransportReplicationAction implements DocumentRequest { +public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { /** * Operation type controls if the type of the index operation. @@ -145,7 +145,6 @@ public class IndexRequest extends ReplicationRequest implements Do private OpType opType = OpType.INDEX; - private boolean refresh = false; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -542,20 +541,6 @@ public class IndexRequest extends ReplicationRequest implements Do return this.opType; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public IndexRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public boolean refresh() { - return this.refresh; - } - /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. @@ -652,7 +637,6 @@ public class IndexRequest extends ReplicationRequest implements Do source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); - refresh = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); @@ -674,7 +658,6 @@ public class IndexRequest extends ReplicationRequest implements Do } out.writeBytesReference(source); out.writeByte(opType.id()); - out.writeBoolean(refresh); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 4116755e4eb..20587bf0ea9 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; @@ -33,7 +34,8 @@ import java.util.Map; /** * An index document action request builder. */ -public class IndexRequestBuilder extends ReplicationRequestBuilder { +public class IndexRequestBuilder extends ReplicationRequestBuilder + implements WriteRequestBuilder { public IndexRequestBuilder(ElasticsearchClient client, IndexAction action) { super(client, action, new IndexRequest()); @@ -220,16 +222,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuildertrue. Defaults - * to false. - */ - public IndexRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 10e18c82b86..00be64757ae 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -36,16 +36,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -62,7 +60,7 @@ import org.elasticsearch.transport.TransportService; *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicationAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -78,7 +76,7 @@ public class TransportIndexAction extends TransportReplicationAction shardOperationOnPrimary(IndexRequest request) throws Exception { - - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - - final WriteResult result = executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); - - final IndexResponse response = result.response; - final Translog.Location location = result.location; - processAfterWrite(request.refresh(), indexShard, location); - return new Tuple<>(response, request); + protected WriteResult onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { + return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); } @Override - protected void shardOperationOnReplica(IndexRequest request) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); - final Engine.Index operation = executeIndexRequestOnReplica(request, indexShard); - processAfterWrite(request.refresh(), indexShard, operation.getTranslogLocation()); + protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) { + return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation(); } /** @@ -188,11 +173,8 @@ public class TransportIndexAction extends TransportReplicationAction executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception { + public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + MappingUpdatedAction mappingUpdatedAction) throws Exception { Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = indexShard.shardId(); @@ -214,8 +196,8 @@ public class TransportIndexAction extends TransportReplicationAction(new IndexResponse(shardId, request.type(), request.id(), request.version(), created), operation.getTranslogLocation()); + IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), created); + return new WriteResult<>(response, operation.getTranslogLocation()); } - } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index 1eb9337c814..850cac040dd 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -162,7 +162,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio return bulkRequest; } else { BulkRequest modifiedBulkRequest = new BulkRequest(); - modifiedBulkRequest.refresh(bulkRequest.refresh()); + modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy()); modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel()); modifiedBulkRequest.timeout(bulkRequest.timeout()); diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java new file mode 100644 index 00000000000..6379a4fb259 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Interface implemented by requests that modify the documents in an index like {@link IndexRequest}, {@link UpdateRequest}, and + * {@link BulkRequest}. Rather than implement this directly most implementers should extend {@link ReplicatedWriteRequest}. + */ +public interface WriteRequest> extends Streamable { + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + R setRefreshPolicy(RefreshPolicy refreshPolicy); + + /** + * Parse the refresh policy from a string, only modifying it if the string is non null. Convenient to use with request parsing. + */ + @SuppressWarnings("unchecked") + default R setRefreshPolicy(String refreshPolicy) { + if (refreshPolicy != null) { + setRefreshPolicy(RefreshPolicy.parse(refreshPolicy)); + } + return (R) this; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + RefreshPolicy getRefreshPolicy(); + + ActionRequestValidationException validate(); + + enum RefreshPolicy implements Writeable { + /** + * Don't refresh after this request. The default. + */ + NONE, + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE, + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL; + + /** + * Parse the string representation of a refresh policy, usually from a request parameter. + */ + public static RefreshPolicy parse(String string) { + switch (string) { + case "false": + return NONE; + /* + * Empty string is IMMEDIATE because that makes "POST /test/test/1?refresh" perform a refresh which reads well and is what folks + * are used to. + */ + case "": + case "true": + return IMMEDIATE; + case "wait_for": + return WAIT_UNTIL; + } + throw new IllegalArgumentException("Unknown value for refresh: [" + string + "]."); + } + + public static RefreshPolicy readFrom(StreamInput in) throws IOException { + return RefreshPolicy.values()[in.readByte()]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) ordinal()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java new file mode 100644 index 00000000000..a87fd043452 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; + +public interface WriteRequestBuilder> { + WriteRequest request(); + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + @SuppressWarnings("unchecked") + default B setRefreshPolicy(RefreshPolicy refreshPolicy) { + request().setRefreshPolicy(refreshPolicy); + return (B) this; + } + + /** + * If set to true then this request will force an immediate refresh. Backwards compatibility layer for Elasticsearch's old + * {@code setRefresh} calls. + * + * @deprecated use {@link #setRefreshPolicy(RefreshPolicy)} with {@link RefreshPolicy#IMMEDIATE} or {@link RefreshPolicy#NONE} instead. + * Will be removed in 6.0. + */ + @Deprecated + default B setRefresh(boolean refresh) { + assert Version.CURRENT.major < 6 : "Remove setRefresh(boolean) in 6.0"; + return setRefreshPolicy(refresh ? RefreshPolicy.IMMEDIATE : RefreshPolicy.NONE); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java b/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java new file mode 100644 index 00000000000..07f5ea695d9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.index.IndexSettings; + +/** + * Interface implemented by responses for actions that modify the documents in an index like {@link IndexResponse}, {@link UpdateResponse}, + * and {@link BulkResponse}. Rather than implement this directly most implementers should extend {@link DocWriteResponse}. + */ +public interface WriteResponse { + /** + * Mark the response as having forced a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to + * {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only + * set this to true if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + */ + public abstract void setForcedRefresh(boolean forcedRefresh); +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java new file mode 100644 index 00000000000..fa02dac9e1e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicatedWriteRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +/** + * Requests that are both {@linkplain ReplicationRequest}s (run on a shard's primary first, then the replica) and {@linkplain WriteRequest} + * (modify documents on a shard), for example {@link BulkShardRequest}, {@link IndexRequest}, and {@link DeleteRequest}. + */ +public abstract class ReplicatedWriteRequest> extends ReplicationRequest implements WriteRequest { + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; + + /** + * Constructor for deserialization. + */ + public ReplicatedWriteRequest() { + } + + public ReplicatedWriteRequest(ShardId shardId) { + super(shardId); + } + + @Override + @SuppressWarnings("unchecked") + public R setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return (R) this; + } + + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + refreshPolicy.writeTo(out); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 1f7313c1943..8442e705257 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.TransportActions; @@ -29,7 +28,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -47,28 +45,41 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; -public class ReplicationOperation, ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> { +public class ReplicationOperation< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends ReplicationOperation.PrimaryResult + > { final private ESLogger logger; final private Request request; final private Supplier clusterStateSupplier; final private String opType; final private AtomicInteger totalShards = new AtomicInteger(); + /** + * The number of pending sub-operations in this operation. This is incremented when the following operations start and decremented when + * they complete: + *
      + *
    • The operation on the primary
    • + *
    • The operation on each replica
    • + *
    • Coordination of the operation as a whole. This prevents the operation from terminating early if we haven't started any replica + * operations and the primary finishes.
    • + *
    + */ final private AtomicInteger pendingShards = new AtomicInteger(); final private AtomicInteger successfulShards = new AtomicInteger(); final private boolean executeOnReplicas; final private boolean checkWriteConsistency; - final private Primary primary; + final private Primary primary; final private Replicas replicasProxy; final private AtomicBoolean finished = new AtomicBoolean(); - final protected ActionListener finalResponseListener; + final protected ActionListener resultListener; - private volatile Response finalResponse = null; + private volatile PrimaryResultT primaryResult = null; private final List shardReplicaFailures = Collections.synchronizedList(new ArrayList<>()); - ReplicationOperation(Request request, Primary primary, - ActionListener listener, + ReplicationOperation(Request request, Primary primary, + ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, Supplier clusterStateSupplier, ESLogger logger, String opType) { @@ -76,7 +87,7 @@ public class ReplicationOperation, R this.executeOnReplicas = executeOnReplicas; this.replicasProxy = replicas; this.primary = primary; - this.finalResponseListener = listener; + this.resultListener = listener; this.logger = logger; this.request = request; this.clusterStateSupplier = clusterStateSupplier; @@ -85,28 +96,27 @@ public class ReplicationOperation, R void execute() throws Exception { final String writeConsistencyFailure = checkWriteConsistency ? checkWriteConsistency() : null; - final ShardId shardId = primary.routingEntry().shardId(); + final ShardRouting primaryRouting = primary.routingEntry(); + final ShardId primaryId = primaryRouting.shardId(); if (writeConsistencyFailure != null) { - finishAsFailed(new UnavailableShardsException(shardId, + finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}], request: [{}]", writeConsistencyFailure, request.timeout(), request)); return; } totalShards.incrementAndGet(); - pendingShards.incrementAndGet(); // increase by 1 until we finish all primary coordination - Tuple primaryResponse = primary.perform(request); - successfulShards.incrementAndGet(); // mark primary as successful - finalResponse = primaryResponse.v1(); - ReplicaRequest replicaRequest = primaryResponse.v2(); + pendingShards.incrementAndGet(); + primaryResult = primary.perform(request); + final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", shardId, opType, request); + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); } // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. // we have to make sure that every operation indexed into the primary after recovery start will also be replicated // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. // If the index gets deleted after primary operation, we skip replication - List shards = getShards(shardId, clusterStateSupplier.get()); + final List shards = getShards(primaryId, clusterStateSupplier.get()); final String localNodeId = primary.routingEntry().currentNodeId(); for (final ShardRouting shard : shards) { if (executeOnReplicas == false || shard.unassigned()) { @@ -125,8 +135,8 @@ public class ReplicationOperation, R } } - // decrement pending and finish (if there are no replicas, or those are done) - decPendingAndFinishIfNeeded(); // incremented in the beginning of this method + successfulShards.incrementAndGet(); + decPendingAndFinishIfNeeded(); } private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) { @@ -241,19 +251,19 @@ public class ReplicationOperation, R failuresArray = new ReplicationResponse.ShardInfo.Failure[shardReplicaFailures.size()]; shardReplicaFailures.toArray(failuresArray); } - finalResponse.setShardInfo(new ReplicationResponse.ShardInfo( + primaryResult.setShardInfo(new ReplicationResponse.ShardInfo( totalShards.get(), successfulShards.get(), failuresArray ) ); - finalResponseListener.onResponse(finalResponse); + resultListener.onResponse(primaryResult); } } private void finishAsFailed(Throwable throwable) { if (finished.compareAndSet(false, true)) { - finalResponseListener.onFailure(throwable); + resultListener.onFailure(throwable); } } @@ -284,22 +294,31 @@ public class ReplicationOperation, R } - interface Primary, ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> { + interface Primary< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends PrimaryResult + > { - /** routing entry for this primary */ + /** + * routing entry for this primary + */ ShardRouting routingEntry(); - /** fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master */ + /** + * fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master + */ void failShard(String message, Throwable throwable); /** - * Performs the given request on this primary + * Performs the given request on this primary. Yes, this returns as soon as it can with the request for the replicas and calls a + * listener when the primary request is completed. Yes, the primary request might complete before the method returns. Yes, it might + * also complete after. Deal with it. * - * @return A tuple containing not null values, as first value the result of the primary operation and as second value - * the request to be executed on the replica shards. + * @param request the request to perform + * @return the request to send to the repicas */ - Tuple perform(Request request) throws Exception; + PrimaryResultT perform(Request request) throws Exception; } @@ -308,19 +327,20 @@ public class ReplicationOperation, R /** * performs the the given request on the specified replica * - * @param replica {@link ShardRouting} of the shard this request should be executed on + * @param replica {@link ShardRouting} of the shard this request should be executed on * @param replicaRequest operation to peform - * @param listener a callback to call once the operation has been complicated, either successfully or with an error. + * @param listener a callback to call once the operation has been complicated, either successfully or with an error. */ void performOn(ShardRouting replica, ReplicaRequest replicaRequest, ActionListener listener); /** * Fail the specified shard, removing it from the current set of active shards - * @param replica shard to fail - * @param primary the primary shard that requested the failure - * @param message a (short) description of the reason - * @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed - * @param onSuccess a callback to call when the shard has been successfully removed from the active set. + * + * @param replica shard to fail + * @param primary the primary shard that requested the failure + * @param message a (short) description of the reason + * @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed + * @param onSuccess a callback to call when the shard has been successfully removed from the active set. * @param onPrimaryDemoted a callback to call when the shard can not be failed because the current primary has been demoted * by the master. * @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the @@ -345,4 +365,11 @@ public class ReplicationOperation, R super(in); } } + + interface PrimaryResult> { + + R replicaRequest(); + + void setShardInfo(ReplicationResponse.ShardInfo shardInfo); + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 3e88575b717..44c420598b5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +40,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * + * Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or + * {@link TransportShardRefreshAction}. */ public abstract class ReplicationRequest> extends ActionRequest implements IndicesRequest { @@ -65,7 +68,6 @@ public abstract class ReplicationRequest, - ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> extends TransportAction { +public abstract class TransportReplicationAction< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + Response extends ReplicationResponse + > extends TransportAction { final protected TransportService transportService; final protected ClusterService clusterService; @@ -149,17 +149,17 @@ public abstract class TransportReplicationAction shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; /** - * Replica operation on nodes with replica copies + * Synchronous replica operation on nodes with replica copies. This is done under the lock form + * {@link #acquireReplicaOperationLock(ShardId, long)}. */ - protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest); + protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest); /** * True if write consistency should be checked for an implementation @@ -198,26 +198,6 @@ public abstract class TransportReplicationAction { - - public final T response; - public final Translog.Location location; - - public WriteResult(T response, Translog.Location location) { - this.response = response; - this.location = location; - } - - @SuppressWarnings("unchecked") - public T response() { - // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica - // request and not use it - response.setShardInfo(new ReplicationResponse.ShardInfo()); - return (T) response; - } - - } - class OperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { @@ -289,7 +269,17 @@ public abstract class TransportReplicationAction listener = createResponseListener(channel, replicationTask, primaryShardReference); - createReplicatedOperation(request, listener, primaryShardReference, executeOnReplicas).execute(); + createReplicatedOperation(request, new ActionListener() { + @Override + public void onResponse(PrimaryResult result) { + result.respond(listener); + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }, primaryShardReference, executeOnReplicas).execute(); success = true; } } finally { @@ -299,9 +289,9 @@ public abstract class TransportReplicationAction - createReplicatedOperation(Request request, ActionListener listener, - PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation( + Request request, ActionListener listener, + PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { return new ReplicationOperation<>(request, primaryShardReference, listener, executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName ); @@ -339,6 +329,41 @@ public abstract class TransportReplicationAction { + final ReplicaRequest replicaRequest; + final Response finalResponse; + + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + this.replicaRequest = replicaRequest; + this.finalResponse = finalResponse; + } + + @Override + public ReplicaRequest replicaRequest() { + return replicaRequest; + } + + @Override + public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { + finalResponse.setShardInfo(shardInfo); + } + + public void respond(ActionListener listener) { + listener.onResponse(finalResponse); + } + } + + protected class ReplicaResult { + /** + * Public constructor so subclasses can call it. + */ + public ReplicaResult() {} + + public void respond(ActionListener listener) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } + } + class ReplicaOperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { @@ -426,15 +451,35 @@ public abstract class TransportReplicationAction { + @Override + public void onResponse(Empty response) { if (logger.isTraceEnabled()) { logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); + request); + } + setPhase(task, "finished"); + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); } } - setPhase(task, "finished"); - channel.sendResponse(TransportResponse.Empty.INSTANCE); + + @Override + public void onFailure(Throwable e) { + responseWithFailure(e); + } } } @@ -722,7 +767,7 @@ public abstract class TransportReplicationAction, Releasable { + class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { private final IndexShard indexShard; private final Releasable operationLock; @@ -751,9 +796,9 @@ public abstract class TransportReplicationAction perform(Request request) throws Exception { - Tuple result = shardOperationOnPrimary(request); - result.v2().primaryTerm(indexShard.getPrimaryTerm()); + public PrimaryResult perform(Request request) throws Exception { + PrimaryResult result = shardOperationOnPrimary(request); + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); return result; } @@ -805,20 +850,6 @@ public abstract class TransportReplicationAction, + Response extends ReplicationResponse & WriteResponse + > extends TransportReplicationAction { + + protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + String executor) { + super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, request, request, executor); + } + + /** + * Called on the primary with a reference to the {@linkplain IndexShard} to modify. + */ + protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; + + /** + * Called once per replica with a reference to the {@linkplain IndexShard} to modify. + * + * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred + */ + protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); + + @Override + protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { + IndexShard indexShard = indexShard(request); + WriteResult result = onPrimaryShard(request, indexShard); + return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); + } + + @Override + protected final WriteReplicaResult shardOperationOnReplica(Request request) { + IndexShard indexShard = indexShard(request); + Translog.Location location = onReplicaShard(request, indexShard); + return new WriteReplicaResult(indexShard, request, location); + } + + /** + * Fetch the IndexShard for the request. Protected so it can be mocked in tests. + */ + protected IndexShard indexShard(Request request) { + final ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + return indexService.getShard(shardId.id()); + } + + /** + * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. + */ + public static class WriteResult { + private final Response response; + private final Translog.Location location; + + public WriteResult(Response response, @Nullable Location location) { + this.response = response; + this.location = location; + } + + public Response getResponse() { + return response; + } + + public Translog.Location getLocation() { + return location; + } + } + + /** + * Result of taking the action on the primary. + */ + class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + boolean finishedAsyncActions; + ActionListener listener = null; + + public WritePrimaryResult(Request request, Response finalResponse, + @Nullable Translog.Location location, + IndexShard indexShard) { + super(request, finalResponse); + /* + * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the + * refresh in parallel on the primary and on the replica. + */ + postWriteActions(indexShard, request, location, this, logger); + } + + @Override + public synchronized void respond(ActionListener listener) { + this.listener = listener; + respondIfPossible(); + } + + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); + } + } + + @Override + public synchronized void respondAfterAsyncAction(boolean forcedRefresh) { + finalResponse.setForcedRefresh(forcedRefresh); + finishedAsyncActions = true; + respondIfPossible(); + } + } + + /** + * Result of taking the action on the replica. + */ + class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { + boolean finishedAsyncActions; + private ActionListener listener; + + public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { + postWriteActions(indexShard, request, location, this, logger); + } + + @Override + public void respond(ActionListener listener) { + this.listener = listener; + respondIfPossible(); + } + + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); + } + } + + @Override + public synchronized void respondAfterAsyncAction(boolean forcedRefresh) { + finishedAsyncActions = true; + respondIfPossible(); + } + } + + private interface RespondingWriteResult { + void respondAfterAsyncAction(boolean forcedRefresh); + } + + static void postWriteActions(final IndexShard indexShard, + final WriteRequest request, + @Nullable final Translog.Location location, + final RespondingWriteResult respond, + final ESLogger logger) { + boolean pendingOps = false; + boolean immediateRefresh = false; + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + immediateRefresh = true; + break; + case WAIT_UNTIL: + if (location != null) { + pendingOps = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + respond.respondAfterAsyncAction(forcedRefresh); + }); + } + break; + case NONE: + break; + } + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + if (pendingOps == false) { + respond.respondAfterAsyncAction(immediateRefresh); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 0363ef8fe43..ca55a63c1d6 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; @@ -187,6 +188,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } else { update.setGetResult(null); } + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @@ -219,6 +221,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio public void onResponse(IndexResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @@ -241,11 +244,13 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio }); break; case DELETE: - deleteAction.execute(result.action(), new ActionListener() { + DeleteRequest deleteRequest = result.action(); + deleteAction.execute(deleteRequest, new ActionListener() { @Override public void onResponse(DeleteResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 9ac77050202..0c9c1c67978 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -131,7 +131,7 @@ public class UpdateHelper extends AbstractComponent { // it has to be a "create!" .create(true) .ttl(ttl) - .refresh(request.refresh()) + .setRefreshPolicy(request.getRefreshPolicy()) .routing(request.routing()) .parent(request.parent()) .consistencyLevel(request.consistencyLevel()); @@ -229,12 +229,13 @@ public class UpdateHelper extends AbstractComponent { .version(updateVersion).versionType(request.versionType()) .consistencyLevel(request.consistencyLevel()) .timestamp(timestamp).ttl(ttl) - .refresh(request.refresh()); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType); } else if ("delete".equals(operation)) { DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent) .version(updateVersion).versionType(request.versionType()) - .consistencyLevel(request.consistencyLevel()); + .consistencyLevel(request.consistencyLevel()) + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType); } else if ("none".equals(operation)) { UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 31f219fd4c7..e0846c1ce5d 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -53,7 +54,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ -public class UpdateRequest extends InstanceShardOperationRequest implements DocumentRequest { +public class UpdateRequest extends InstanceShardOperationRequest + implements DocumentRequest, WriteRequest { private String type; private String id; @@ -72,7 +74,7 @@ public class UpdateRequest extends InstanceShardOperationRequest private VersionType versionType = VersionType.INTERNAL; private int retryOnConflict = 0; - private boolean refresh = false; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; @@ -422,18 +424,15 @@ public class UpdateRequest extends InstanceShardOperationRequest return this.versionType; } - /** - * Should a refresh be executed post this update operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public UpdateRequest refresh(boolean refresh) { - this.refresh = refresh; + @Override + public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return this; } - public boolean refresh() { - return this.refresh; + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } public WriteConsistencyLevel consistencyLevel() { @@ -730,7 +729,7 @@ public class UpdateRequest extends InstanceShardOperationRequest script = new Script(in); } retryOnConflict = in.readVInt(); - refresh = in.readBoolean(); + refreshPolicy = RefreshPolicy.readFrom(in); if (in.readBoolean()) { doc = new IndexRequest(); doc.readFrom(in); @@ -767,7 +766,7 @@ public class UpdateRequest extends InstanceShardOperationRequest script.writeTo(out); } out.writeVInt(retryOnConflict); - out.writeBoolean(refresh); + refreshPolicy.writeTo(out); if (doc == null) { out.writeBoolean(false); } else { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 30b636f4efc..403f4265fcd 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; @@ -32,9 +33,8 @@ import org.elasticsearch.script.Script; import java.util.Map; -/** - */ -public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder { +public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder + implements WriteRequestBuilder { public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) { super(client, action, new UpdateRequest()); @@ -121,17 +121,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuildertrue. Defaults - * to false. - */ - public UpdateRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} */ diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 027100b3469..b9d0c6b4c70 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -115,6 +115,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.QUERY_STRING_LENIENT_SETTING, IndexSettings.ALLOW_UNMAPPED, IndexSettings.INDEX_CHECK_ON_STARTUP, + IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 7c8cb4ff8c8..592c1ff1125 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -115,6 +115,11 @@ public final class IndexSettings { public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); + /** + * The maximum number of refresh listeners allows on this shard. + */ + public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0, + Property.Dynamic, Property.IndexScope); private final Index index; private final Version version; @@ -145,6 +150,10 @@ public final class IndexSettings { private volatile int maxResultWindow; private volatile int maxRescoreWindow; private volatile boolean TTLPurgeDisabled; + /** + * The maximum number of refresh listeners allows on this shard. + */ + private volatile int maxRefreshListeners; /** * Returns the default search field for this index. @@ -229,6 +238,7 @@ public final class IndexSettings { maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxRescoreWindow = scopedSettings.get(MAX_RESCORE_WINDOW_SETTING); TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); + maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); this.mergePolicyConfig = new MergePolicyConfig(logger, this); assert indexNameMatcher.test(indexMetaData.getIndex().getName()); @@ -251,6 +261,7 @@ public final class IndexSettings { scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval); + scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); } private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { @@ -499,6 +510,16 @@ public final class IndexSettings { return scopedSettings.get(setting); } + /** + * The maximum number of refresh listeners allows on this shard. + */ + public int getMaxRefreshListeners() { + return maxRefreshListeners; + } + + private void setMaxRefreshListeners(int maxRefreshListeners) { + this.maxRefreshListeners = maxRefreshListeners; + } IndexScopedSettings getScopedSettings() { return scopedSettings;} } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index ab142f9dd51..87ffb9331a6 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -607,6 +607,7 @@ public abstract class Engine implements Closeable { * Synchronously refreshes the engine for new search operations to reflect the latest * changes. */ + @Nullable public abstract void refresh(String source) throws EngineException; /** @@ -999,6 +1000,9 @@ public abstract class Engine implements Closeable { public static final GetResult NOT_EXISTS = new GetResult(false, Versions.NOT_FOUND, null); + /** + * Build a realtime get result from the translog. + */ public GetResult(boolean exists, long version, @Nullable Translog.Source source) { this.source = source; this.exists = exists; @@ -1007,6 +1011,9 @@ public abstract class Engine implements Closeable { this.searcher = null; } + /** + * Build a non-realtime get result from the searcher. + */ public GetResult(Searcher searcher, Versions.DocIdAndVersion docIdAndVersion) { this.exists = true; this.source = null; diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 8a56feff70f..13408408e7e 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -25,14 +25,15 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; @@ -40,8 +41,6 @@ import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; -import java.util.function.Function; - /* * Holds all the configuration that is used to create an {@link Engine}. * Once {@link Engine} has been created with this object, changes to this @@ -66,6 +65,8 @@ public final class EngineConfig { private final Engine.EventListener eventListener; private final QueryCache queryCache; private final QueryCachingPolicy queryCachingPolicy; + @Nullable + private final RefreshListeners refreshListeners; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -99,7 +100,7 @@ public final class EngineConfig { MergePolicy mergePolicy,Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, - TranslogConfig translogConfig, TimeValue flushMergesAfter) { + TranslogConfig translogConfig, TimeValue flushMergesAfter, RefreshListeners refreshListeners) { if (openMode == null) { throw new IllegalArgumentException("openMode must not be null"); } @@ -125,6 +126,7 @@ public final class EngineConfig { this.translogConfig = translogConfig; this.flushMergesAfter = flushMergesAfter; this.openMode = openMode; + this.refreshListeners = refreshListeners; } /** @@ -303,4 +305,10 @@ public final class EngineConfig { OPEN_INDEX_AND_TRANSLOG; } + /** + * {@linkplain RefreshListeners} instance to configure. + */ + public RefreshListeners getRefreshListeners() { + return refreshListeners; + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index c120b07fce8..15667e79421 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -154,6 +154,10 @@ public class InternalEngine extends Engine { this.versionMap.setManager(searcherManager); // don't allow commits until we are done with recovering allowCommits.compareAndSet(true, openMode != EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); + if (engineConfig.getRefreshListeners() != null) { + searcherManager.addListener(engineConfig.getRefreshListeners()); + engineConfig.getRefreshListeners().setTranslog(translog); + } success = true; } finally { if (success == false) { diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index c30b2e9bf50..0a55803a5ec 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; -import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -68,6 +67,9 @@ public class ShadowEngine extends Engine { public ShadowEngine(EngineConfig engineConfig) { super(engineConfig); + if (engineConfig.getRefreshListeners() != null) { + throw new IllegalArgumentException("ShadowEngine doesn't support RefreshListeners"); + } SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig); final long nonexistentRetryTime = engineConfig.getIndexSettings().getSettings() .getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 44445f0b6dd..33d1aec7ed0 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -131,6 +131,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import java.util.function.BiConsumer; public class IndexShard extends AbstractIndexShardComponent { @@ -203,6 +204,12 @@ public class IndexShard extends AbstractIndexShardComponent { * IndexingMemoryController}). */ private final AtomicBoolean active = new AtomicBoolean(); + /** + * Allows for the registration of listeners that are called when a change becomes visible for search. This is nullable because + * {@linkplain ShadowIndexShard} doesn't support this. + */ + @Nullable + private final RefreshListeners refreshListeners; public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @@ -255,6 +262,7 @@ public class IndexShard extends AbstractIndexShardComponent { suspendableRefContainer = new SuspendableRefContainer(); searcherWrapper = indexSearcherWrapper; primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id()); + refreshListeners = buildRefreshListeners(); persistMetadata(shardRouting, null); } @@ -579,6 +587,7 @@ public class IndexShard extends AbstractIndexShardComponent { */ public void refresh(String source) { verifyNotClosed(); + if (canIndex()) { long bytes = getEngine().getIndexBufferRAMBytesUsed(); writingBytes.addAndGet(bytes); @@ -1530,7 +1539,7 @@ public class IndexShard extends AbstractIndexShardComponent { return new EngineConfig(openMode, shardId, threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, - IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings())); + IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners); } public Releasable acquirePrimaryOperationLock() { @@ -1626,6 +1635,17 @@ public class IndexShard extends AbstractIndexShardComponent { return false; } + /** + * Build {@linkplain RefreshListeners} for this shard. Protected so {@linkplain ShadowIndexShard} can override it to return null. + */ + protected RefreshListeners buildRefreshListeners() { + return new RefreshListeners( + indexSettings::getMaxRefreshListeners, + () -> refresh("too_many_listeners"), + threadPool.executor(ThreadPool.Names.LISTENER)::execute, + logger); + } + /** * Simple struct encapsulating a shard failure * @@ -1651,14 +1671,26 @@ public class IndexShard extends AbstractIndexShardComponent { } /** - * Returns true iff one or more changes to the engine are not visible to via the current searcher. + * Returns true iff one or more changes to the engine are not visible to via the current searcher *or* there are pending + * refresh listeners. * Otherwise false. * * @throws EngineClosedException if the engine is already closed * @throws AlreadyClosedException if the internal indexwriter in the engine is already closed */ public boolean isRefreshNeeded() { - return getEngine().refreshNeeded(); + return getEngine().refreshNeeded() || (refreshListeners != null && refreshListeners.refreshNeeded()); + } + + /** + * Add a listener for refreshes. + * + * @param location the location to listen for + * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with + * false otherwise. + */ + public void addRefreshListener(Translog.Location location, Consumer listener) { + refreshListeners.addOrNotify(location, listener); } private class IndexShardRecoveryPerformer extends TranslogRecoveryPerformer { diff --git a/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java new file mode 100644 index 00000000000..ab3e334714a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.apache.lucene.search.ReferenceManager; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.function.Consumer; +import java.util.function.IntSupplier; + +import static java.util.Objects.requireNonNull; + +/** + * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from + * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. + */ +public final class RefreshListeners implements ReferenceManager.RefreshListener { + private final IntSupplier getMaxRefreshListeners; + private final Runnable forceRefresh; + private final Executor listenerExecutor; + private final ESLogger logger; + + /** + * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed + * from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle + * are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}. + */ + private volatile List>> refreshListeners = null; + /** + * The translog location that was last made visible by a refresh. + */ + private volatile Translog.Location lastRefreshedLocation; + + public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, ESLogger logger) { + this.getMaxRefreshListeners = getMaxRefreshListeners; + this.forceRefresh = forceRefresh; + this.listenerExecutor = listenerExecutor; + this.logger = logger; + } + + /** + * Add a listener for refreshes, calling it immediately if the location is already visible. If this runs out of listener slots then it + * forces a refresh and calls the listener immediately as well. + * + * @param location the location to listen for + * @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with + * false otherwise. + */ + public void addOrNotify(Translog.Location location, Consumer listener) { + requireNonNull(listener, "listener cannot be null"); + requireNonNull(location, "location cannot be null"); + + if (lastRefreshedLocation != null && lastRefreshedLocation.compareTo(location) >= 0) { + // Location already visible, just call the listener + listener.accept(false); + return; + } + synchronized (this) { + if (refreshListeners == null) { + refreshListeners = new ArrayList<>(); + } + if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) { + // We have a free slot so register the listener + refreshListeners.add(new Tuple<>(location, listener)); + return; + } + } + // No free slot so force a refresh and call the listener in this thread + forceRefresh.run(); + listener.accept(true); + } + + /** + * Returns true if there are pending listeners. + */ + public boolean refreshNeeded() { + // No need to synchronize here because we're doing a single volatile read + return refreshListeners != null; + } + + /** + * Setup the translog used to find the last refreshed location. + */ + public void setTranslog(Translog translog) { + this.translog = translog; + } + + // Implementation of ReferenceManager.RefreshListener that adapts Lucene's RefreshListener into Elasticsearch's refresh listeners. + private Translog translog; + /** + * Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile + * because when it is used by the refreshing thread. + */ + private Translog.Location currentRefreshLocation; + + @Override + public void beforeRefresh() throws IOException { + currentRefreshLocation = translog.getLastWriteLocation(); + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + /* + * We intentionally ignore didRefresh here because our timing is a little off. It'd be a useful flag if we knew everything that made + * it into the refresh, but the way we snapshot the translog position before the refresh, things can sneak into the refresh that we + * don't know about. + */ + if (null == currentRefreshLocation) { + /* + * The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This + * usually happens during recovery. The next refresh cycle out to pick up this refresh. + */ + return; + } + // First check if we've actually moved forward. If not then just bail immediately. + assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0; + if (lastRefreshedLocation != null && currentRefreshLocation.compareTo(lastRefreshedLocation) == 0) { + return; + } + /* + * Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing + * around with refreshListeners or synchronizing at all. + */ + lastRefreshedLocation = currentRefreshLocation; + /* + * Grab the current refresh listeners and replace them with null while synchronized. Any listeners that come in after this won't be + * in the list we iterate over and very likely won't be candidates for refresh anyway because we've already moved the + * lastRefreshedLocation. + */ + List>> candidates; + synchronized (this) { + candidates = refreshListeners; + // No listeners to check so just bail early + if (candidates == null) { + return; + } + refreshListeners = null; + } + // Iterate the list of listeners, copying the listeners to fire to one list and those to preserve to another list. + List> listenersToFire = null; + List>> preservedListeners = null; + for (Tuple> tuple : candidates) { + Translog.Location location = tuple.v1(); + Consumer listener = tuple.v2(); + if (location.compareTo(currentRefreshLocation) <= 0) { + if (listenersToFire == null) { + listenersToFire = new ArrayList<>(); + } + listenersToFire.add(listener); + } else { + if (preservedListeners == null) { + preservedListeners = new ArrayList<>(); + } + preservedListeners.add(tuple); + } + } + /* + * Now add any preserved listeners back to the running list of refresh listeners while under lock. We'll try them next time. While + * we were iterating the list of listeners new listeners could have come in. That means that adding all of our preserved listeners + * might push our list of listeners above the maximum number of slots allowed. This seems unlikely because we expect few listeners + * to be preserved. And the next listener while we're full will trigger a refresh anyway. + */ + if (preservedListeners != null) { + synchronized (this) { + if (refreshListeners == null) { + refreshListeners = new ArrayList<>(); + } + refreshListeners.addAll(preservedListeners); + } + } + // Lastly, fire the listeners that are ready on the listener thread pool + if (listenersToFire != null) { + final List> finalListenersToFire = listenersToFire; + listenerExecutor.execute(() -> { + for (Consumer listener : finalListenersToFire) { + try { + listener.accept(false); + } catch (Throwable t) { + logger.warn("Error firing refresh listener", t); + } + } + }); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index bf35d02fea2..e22f684637e 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -31,12 +31,14 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.function.Consumer; /** * ShadowIndexShard extends {@link IndexShard} to add file synchronization @@ -86,6 +88,12 @@ public final class ShadowIndexShard extends IndexShard { return engineFactory.newReadOnlyEngine(config); } + @Override + protected RefreshListeners buildRefreshListeners() { + // ShadowEngine doesn't have a translog so it shouldn't try to support RefreshListeners. + return null; + } + @Override public boolean shouldFlush() { // we don't need to flush since we don't write - all dominated by the primary @@ -96,4 +104,9 @@ public final class ShadowIndexShard extends IndexShard { public TranslogStats translogStats() { return null; // shadow engine has no translog } + + @Override + public void addRefreshListener(Translog.Location location, Consumer listener) { + throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); + } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index b66c82d4083..57847972e42 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -447,6 +447,21 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } + /** + * The a {@linkplain Location} that will sort after the {@linkplain Location} returned by the last write but before any locations which + * can be returned by the next write. + */ + public Location getLastWriteLocation() { + try (ReleasableLock lock = readLock.acquire()) { + /* + * We use position = current - 1 and size = Integer.MAX_VALUE here instead of position current and size = 0 for two reasons: + * 1. Translog.Location's compareTo doesn't actually pay attention to size even though it's equals method does. + * 2. It feels more right to return a *position* that is before the next write's position rather than rely on the size. + */ + return new Location(current.generation, current.sizeInBytes() - 1, Integer.MAX_VALUE); + } + } + boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { // tests can override this ByteBuffer buffer = ByteBuffer.allocate(location.size); diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 0109995f80f..b2c0cc88cf9 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -26,6 +26,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.shard.ShardId; diff --git a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java index 620418eb087..d9dbb21e804 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java @@ -84,7 +84,7 @@ public class RestBulkAction extends BaseRestHandler { bulkRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); - bulkRequest.refresh(request.paramAsBoolean("refresh", bulkRequest.refresh())); + bulkRequest.setRefreshPolicy(request.param("refresh")); bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, allowExplicitIndex); client.bulk(bulkRequest, new RestBuilderListener(channel) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index 8e3449344c4..29316893504 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -51,7 +51,7 @@ public class RestDeleteAction extends BaseRestHandler { deleteRequest.routing(request.param("routing")); deleteRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); - deleteRequest.refresh(request.paramAsBoolean("refresh", deleteRequest.refresh())); + deleteRequest.setRefreshPolicy(request.param("refresh")); deleteRequest.version(RestActions.parseVersion(request)); deleteRequest.versionType(VersionType.fromString(request.param("version_type"), deleteRequest.versionType())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index 26dd1eca78d..f807e68088a 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -80,7 +80,7 @@ public class RestIndexAction extends BaseRestHandler { indexRequest.setPipeline(request.param("pipeline")); indexRequest.source(request.content()); indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT)); - indexRequest.refresh(request.paramAsBoolean("refresh", indexRequest.refresh())); + indexRequest.setRefreshPolicy(request.param("refresh")); indexRequest.version(RestActions.parseVersion(request)); indexRequest.versionType(VersionType.fromString(request.param("version_type"), indexRequest.versionType())); String sOpType = request.param("op_type"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index 88f90374523..bdea4e33e6d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -58,7 +58,7 @@ public class RestUpdateAction extends BaseRestHandler { updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); - updateRequest.refresh(request.paramAsBoolean("refresh", updateRequest.refresh())); + updateRequest.setRefreshPolicy(request.param("refresh")); String consistencyLevel = request.param("consistency"); if (consistencyLevel != null) { updateRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index f2afc1e7f6e..337f881d41b 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Strings; @@ -180,22 +181,22 @@ public class BulkRequestTests extends ESTestCase { public void testBulkRequestWithRefresh() throws Exception { BulkRequest bulkRequest = new BulkRequest(); // We force here a "id is missing" validation error - bulkRequest.add(new DeleteRequest("index", "type", null).refresh(true)); + bulkRequest.add(new DeleteRequest("index", "type", null).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); // We force here a "type is missing" validation error bulkRequest.add(new DeleteRequest("index", null, "id")); - bulkRequest.add(new DeleteRequest("index", "type", "id").refresh(true)); - bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").refresh(true)); - bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").refresh(true)); + bulkRequest.add(new DeleteRequest("index", "type", "id").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); ActionRequestValidationException validate = bulkRequest.validate(); assertThat(validate, notNullValue()); assertThat(validate.validationErrors(), not(empty())); assertThat(validate.validationErrors(), contains( - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", "id is missing", "type is missing", - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", - "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.")); + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.")); } // issue 15120 diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java index ff1a24d6900..b26d2531ff0 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkShardRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -28,9 +29,11 @@ public class BulkShardRequestTests extends ESTestCase { public void testToString() { String index = randomSimpleString(random(), 10); int count = between(1, 100); - BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), false, new BulkItemRequest[count]); + BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.NONE, new BulkItemRequest[count]); assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString()); - r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), true, new BulkItemRequest[count]); + r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.IMMEDIATE, new BulkItemRequest[count]); assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString()); + r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.WAIT_UNTIL, new BulkItemRequest[count]); + assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests blocking until refresh", r.toString()); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 6175f822b6a..ebaa5b5c01e 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index cc7558d1de8..55e2a9d3cf2 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -21,16 +21,15 @@ package org.elasticsearch.action.support.replication; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; @@ -102,7 +101,7 @@ public class ReplicationOperationTests extends ESTestCase { } Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final ClusterState finalState = state; final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures); final TestReplicationOperation op = new TestReplicationOperation(request, @@ -114,7 +113,7 @@ public class ReplicationOperationTests extends ESTestCase { assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); assertThat(replicasProxy.failedReplicas, equalTo(expectedFailedShards)); assertTrue("listener is not marked as done", listener.isDone()); - Response.ShardInfo shardInfo = listener.actionGet().getShardInfo(); + ShardInfo shardInfo = listener.actionGet().getShardInfo(); assertThat(shardInfo.getFailed(), equalTo(expectedFailedShards.size())); assertThat(shardInfo.getFailures(), arrayWithSize(expectedFailedShards.size())); assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - expectedFailures.size())); @@ -135,7 +134,7 @@ public class ReplicationOperationTests extends ESTestCase { final ShardRouting primaryShard = indexShardRoutingTable.primaryShard(); Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, primaryTerm), listener, false, false, new TestReplicaProxy(), () -> state, logger, "test"); @@ -143,7 +142,7 @@ public class ReplicationOperationTests extends ESTestCase { assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(Collections.emptySet())); assertTrue("listener is not marked as done", listener.isDone()); - Response.ShardInfo shardInfo = listener.actionGet().getShardInfo(); + ShardInfo shardInfo = listener.actionGet().getShardInfo(); assertThat(shardInfo.getFailed(), equalTo(0)); assertThat(shardInfo.getFailures(), arrayWithSize(0)); assertThat(shardInfo.getSuccessful(), equalTo(1)); @@ -172,7 +171,7 @@ public class ReplicationOperationTests extends ESTestCase { expectedFailures.put(failedReplica, new CorruptIndexException("simulated", (String) null)); Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final ClusterState finalState = state; final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) { @Override @@ -233,16 +232,16 @@ public class ReplicationOperationTests extends ESTestCase { final ShardRouting primaryShard = state.get().routingTable().shardRoutingTable(shardId).primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) { @Override - public Tuple perform(Request request) throws Exception { - final Tuple tuple = super.perform(request); + public Result perform(Request request) throws Exception { + Result result = super.perform(request); state.set(changedState); logger.debug("--> state after primary operation:\n{}", state.get().prettyPrint()); - return tuple; + return result; } }; Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, new TestReplicaProxy(), state::get); op.execute(); @@ -296,7 +295,7 @@ public class ReplicationOperationTests extends ESTestCase { state.prettyPrint()); final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id()); - PlainActionFuture listener = new PlainActionFuture<>(); + PlainActionFuture listener = new PlainActionFuture<>(); final ShardRouting primaryShard = shardRoutingTable.primaryShard(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, primaryTerm), @@ -362,10 +361,7 @@ public class ReplicationOperationTests extends ESTestCase { } } - static class Response extends ReplicationResponse { - } - - static class TestPrimary implements ReplicationOperation.Primary { + static class TestPrimary implements ReplicationOperation.Primary { final ShardRouting routing; final long term; @@ -385,12 +381,35 @@ public class ReplicationOperationTests extends ESTestCase { } @Override - public Tuple perform(Request request) throws Exception { + public Result perform(Request request) throws Exception { if (request.processedOnPrimary.compareAndSet(false, true) == false) { fail("processed [" + request + "] twice"); } request.primaryTerm(term); - return new Tuple<>(new Response(), request); + return new Result(request); + } + + static class Result implements ReplicationOperation.PrimaryResult { + private final Request request; + private ShardInfo shardInfo; + + public Result(Request request) { + this.request = request; + } + + @Override + public Request replicaRequest() { + return request; + } + + @Override + public void setShardInfo(ShardInfo shardInfo) { + this.shardInfo = shardInfo; + } + + public ShardInfo getShardInfo() { + return shardInfo; + } } } @@ -436,15 +455,15 @@ public class ReplicationOperationTests extends ESTestCase { } } - class TestReplicationOperation extends ReplicationOperation { - public TestReplicationOperation(Request request, Primary primary, ActionListener listener, - Replicas replicas, Supplier clusterStateSupplier) { + class TestReplicationOperation extends ReplicationOperation { + public TestReplicationOperation(Request request, Primary primary, + ActionListener listener, Replicas replicas, Supplier clusterStateSupplier) { this(request, primary, listener, true, false, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); } - public TestReplicationOperation(Request request, Primary primary, ActionListener listener, - boolean executeOnReplicas, boolean checkWriteConsistency, Replicas replicas, - Supplier clusterStateSupplier, ESLogger logger, String opType) { + public TestReplicationOperation(Request request, Primary primary, + ActionListener listener, boolean executeOnReplicas, boolean checkWriteConsistency, + Replicas replicas, Supplier clusterStateSupplier, ESLogger logger, String opType) { super(request, primary, listener, executeOnReplicas, checkWriteConsistency, replicas, clusterStateSupplier, logger, opType); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 2e81ec712eb..afa72ec7526 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; @@ -43,7 +42,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -155,7 +153,7 @@ public class TransportReplicationActionTests extends ESTestCase { ClusterBlocks.Builder block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); assertPhase(task, "failed"); @@ -199,7 +197,7 @@ public class TransportReplicationActionTests extends ESTestCase { Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class); assertPhase(task, "failed"); @@ -245,7 +243,7 @@ public class TransportReplicationActionTests extends ESTestCase { Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener); reroutePhase.run(); assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class); @@ -285,7 +283,7 @@ public class TransportReplicationActionTests extends ESTestCase { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertListenerThrows("must throw index not found exception", listener, IndexNotFoundException.class); assertPhase(task, "failed"); @@ -312,7 +310,7 @@ public class TransportReplicationActionTests extends ESTestCase { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests, arrayWithSize(1)); @@ -364,7 +362,7 @@ public class TransportReplicationActionTests extends ESTestCase { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); - TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + Action.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); reroutePhase.run(); assertThat(request.shardId(), equalTo(shardId)); logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId); @@ -393,9 +391,9 @@ public class TransportReplicationActionTests extends ESTestCase { AtomicBoolean executed = new AtomicBoolean(); Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -448,9 +446,9 @@ public class TransportReplicationActionTests extends ESTestCase { AtomicBoolean executed = new AtomicBoolean(); Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -478,9 +476,9 @@ public class TransportReplicationActionTests extends ESTestCase { }; Action.PrimaryShardReference primary = action.new PrimaryShardReference(shard, releasable); final Request request = new Request(); - Tuple result = primary.perform(request); + Request replicaRequest = primary.perform(request).replicaRequest; - assertThat(result.v2().primaryTerm(), equalTo(primaryTerm)); + assertThat(replicaRequest.primaryTerm(), equalTo(primaryTerm)); final ElasticsearchException exception = new ElasticsearchException("testing"); primary.failShard("test", exception); @@ -582,9 +580,9 @@ public class TransportReplicationActionTests extends ESTestCase { setState(clusterService, state); Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener actionListener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener actionListener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { assertFalse(executeOnReplicas); return new NoopReplicationOperation(request, actionListener); } @@ -608,9 +606,9 @@ public class TransportReplicationActionTests extends ESTestCase { Action.PrimaryOperationTransportHandler primaryPhase = action.new PrimaryOperationTransportHandler() { @Override - protected ReplicationOperation createReplicatedOperation(Request request, ActionListener listener, - Action.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + protected ReplicationOperation createReplicatedOperation(Request request, + ActionListener listener, Action.PrimaryShardReference primaryShardReference, + boolean executeOnReplicas) { assertIndexShardCounter(1); if (throwExceptionOnCreation) { throw new ElasticsearchException("simulated exception, during createReplicatedOperation"); @@ -623,7 +621,7 @@ public class TransportReplicationActionTests extends ESTestCase { if (throwExceptionOnRun) { throw new ElasticsearchException("simulated exception, during performOnPrimary"); } else if (respondWithError) { - this.finalResponseListener.onFailure(new ElasticsearchException("simulated exception, as a response")); + this.resultListener.onFailure(new ElasticsearchException("simulated exception, as a response")); } else { super.execute(); } @@ -667,13 +665,13 @@ public class TransportReplicationActionTests extends ESTestCase { final ReplicationTask task = maybeTask(); Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) { @Override - protected void shardOperationOnReplica(Request request) { + protected ReplicaResult shardOperationOnReplica(Request request) { assertIndexShardCounter(1); assertPhase(task, "replica"); if (throwException) { throw new ElasticsearchException("simulated"); } - super.shardOperationOnReplica(request); + return new ReplicaResult(); } }; final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); @@ -765,15 +763,16 @@ public class TransportReplicationActionTests extends ESTestCase { } @Override - protected Tuple shardOperationOnPrimary(Request shardRequest) throws Exception { + protected PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception { boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true); assert executedBefore == false : "request has already been executed on the primary"; - return new Tuple<>(new Response(), shardRequest); + return new PrimaryResult(shardRequest, new Response()); } @Override - protected void shardOperationOnReplica(Request request) { + protected ReplicaResult shardOperationOnReplica(Request request) { request.processedOnReplicas.incrementAndGet(); + return new ReplicaResult(); } @Override @@ -822,15 +821,14 @@ public class TransportReplicationActionTests extends ESTestCase { } } - class NoopReplicationOperation extends ReplicationOperation { - - public NoopReplicationOperation(Request request, ActionListener listener) { + class NoopReplicationOperation extends ReplicationOperation { + public NoopReplicationOperation(Request request, ActionListener listener) { super(request, null, listener, true, true, null, null, TransportReplicationActionTests.this.logger, "noop"); } @Override public void execute() throws Exception { - this.finalResponseListener.onResponse(new Response()); + this.resultListener.onResponse(action.new PrimaryResult(null, new Response())); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java new file mode 100644 index 00000000000..7b312959631 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.HashSet; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +public class TransportWriteActionTests extends ESTestCase { + private IndexShard indexShard; + private Translog.Location location; + + @Before + public void initCommonMocks() { + indexShard = mock(IndexShard.class); + location = mock(Translog.Location.class); + } + + public void testPrimaryNoRefreshCall() throws Exception { + noRefreshCall(TestAction::shardOperationOnPrimary, TestAction.WritePrimaryResult::respond); + } + + public void testReplicaNoRefreshCall() throws Exception { + noRefreshCall(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond); + } + + private void noRefreshCall(ThrowingBiFunction action, + BiConsumer> responder) + throws Exception { + TestRequest request = new TestRequest(); + request.setRefreshPolicy(RefreshPolicy.NONE); // The default, but we'll set it anyway just to be explicit + Result result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + responder.accept(result, listener); + assertNotNull(listener.response); + verify(indexShard, never()).refresh(any()); + verify(indexShard, never()).addRefreshListener(any(), any()); + } + + public void testPrimaryImmediateRefresh() throws Exception { + immediateRefresh(TestAction::shardOperationOnPrimary, TestAction.WritePrimaryResult::respond, r -> assertTrue(r.forcedRefresh)); + } + + public void testReplicaImmediateRefresh() throws Exception { + immediateRefresh(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond, r -> {}); + } + + private void immediateRefresh(ThrowingBiFunction action, + BiConsumer> responder, + Consumer responseChecker) throws Exception { + TestRequest request = new TestRequest(); + request.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + Result result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + responder.accept(result, listener); + assertNotNull(listener.response); + responseChecker.accept(listener.response); + verify(indexShard).refresh("refresh_flag_index"); + verify(indexShard, never()).addRefreshListener(any(), any()); + } + + public void testPrimaryWaitForRefresh() throws Exception { + waitForRefresh(TestAction::shardOperationOnPrimary, TestAction.WritePrimaryResult::respond, + (r, forcedRefresh) -> assertEquals(forcedRefresh, r.forcedRefresh)); + } + + public void testReplicaWaitForRefresh() throws Exception { + waitForRefresh(TestAction::shardOperationOnReplica, TestAction.WriteReplicaResult::respond, (r, forcedRefresh) -> {}); + } + + private void waitForRefresh(ThrowingBiFunction action, + BiConsumer> responder, + BiConsumer resultChecker) throws Exception { + TestRequest request = new TestRequest(); + request.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + Result result = action.apply(new TestAction(), request); + CapturingActionListener listener = new CapturingActionListener<>(); + responder.accept(result, listener); + assertNull(listener.response); // Haven't reallresponded yet + + @SuppressWarnings({ "unchecked", "rawtypes" }) + ArgumentCaptor> refreshListener = ArgumentCaptor.forClass((Class) Consumer.class); + verify(indexShard, never()).refresh(any()); + verify(indexShard).addRefreshListener(any(), refreshListener.capture()); + + // Now we can fire the listener manually and we'll get a response + boolean forcedRefresh = randomBoolean(); + refreshListener.getValue().accept(forcedRefresh); + assertNotNull(listener.response); + resultChecker.accept(listener.response, forcedRefresh); + } + + private class TestAction extends TransportWriteAction { + protected TestAction() { + super(Settings.EMPTY, "test", mock(TransportService.class), null, null, null, null, new ActionFilters(new HashSet<>()), + new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, ThreadPool.Names.SAME); + } + + @Override + protected IndexShard indexShard(TestRequest request) { + return indexShard; + } + + @Override + protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { + return new WriteResult<>(new TestResponse(), location); + } + + @Override + protected Location onReplicaShard(TestRequest request, IndexShard indexShard) { + return location; + } + + @Override + protected TestResponse newResponseInstance() { + return new TestResponse(); + } + } + + private static class TestRequest extends ReplicatedWriteRequest { + public TestRequest() { + setShardId(new ShardId("test", "test", 1)); + } + } + + private static class TestResponse extends ReplicationResponse implements WriteResponse { + boolean forcedRefresh; + + @Override + public void setForcedRefresh(boolean forcedRefresh) { + this.forcedRefresh = forcedRefresh; + } + } + + private static class CapturingActionListener implements ActionListener { + private R response; + + @Override + public void onResponse(R response) { + this.response = response; + } + + @Override + public void onFailure(Throwable e) { + throw new RuntimeException(e); + } + } + + private interface ThrowingBiFunction { + R apply(A a, B b) throws Exception; + } +} diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 08f8970f1e4..2a16625d037 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -175,10 +176,15 @@ public class IndexAliasesIT extends ESIntegTestCase { assertAcked(admin().indices().prepareAliases().addAlias("test", "tests", termQuery("name", "test"))); logger.info("--> indexing against [test]"); - client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).refresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).refresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).refresh(true)).actionGet(); - client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).refresh(true)).actionGet(); + client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); + client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); + client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); + client().index( + indexRequest("test").type("type1").id("4").source(source("4", "something else")).setRefreshPolicy(RefreshPolicy.IMMEDIATE)) + .actionGet(); logger.info("--> checking single filtering alias search"); SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get(); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 0d43580c9bc..c86535e40c5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster.allocation; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -225,7 +226,7 @@ public class ClusterRerouteIT extends ESIntegTestCase { assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); - client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); final Index index = resolveIndex("test"); logger.info("--> closing all nodes"); diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java index 4f28cf19d7b..765cee3b6e8 100644 --- a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java +++ b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.document; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -27,6 +26,7 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java b/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java new file mode 100644 index 00000000000..b2cb2d96818 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/WaitUntilRefreshIT.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.ListenableActionFuture; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.NativeScriptFactory; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; + +/** + * Tests that requests with RefreshPolicy.WAIT_UNTIL will be visible when they return. + */ +public class WaitUntilRefreshIT extends ESIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + } + + @Override + public Settings indexSettings() { + // Use a shorter refresh interval to speed up the tests. We'll be waiting on this interval several times. + return Settings.builder().put(super.indexSettings()).put("index.refresh_interval", "40ms").build(); + } + + @Before + public void createTestIndex() { + createIndex("test"); + } + + public void testIndex() { + IndexResponse index = client().prepareIndex("test", "index", "1").setSource("foo", "bar").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .get(); + assertEquals(RestStatus.CREATED, index.status()); + assertFalse("request shouldn't have forced a refresh", index.forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + } + + public void testDelete() throws InterruptedException, ExecutionException { + // Index normally + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "bar")); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + + // Now delete with blockUntilRefresh + DeleteResponse delete = client().prepareDelete("test", "test", "1").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); + assertTrue("document was deleted", delete.isFound()); + assertFalse("request shouldn't have forced a refresh", delete.forcedRefresh()); + assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); + } + + public void testUpdate() throws InterruptedException, ExecutionException { + // Index normally + indexRandom(true, client().prepareIndex("test", "test", "1").setSource("foo", "bar")); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + + // Update with RefreshPolicy.WAIT_UNTIL + UpdateResponse update = client().prepareUpdate("test", "test", "1").setDoc("foo", "baz").setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .get(); + assertEquals(2, update.getVersion()); + assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); + + // Upsert with RefreshPolicy.WAIT_UNTIL + update = client().prepareUpdate("test", "test", "2").setDocAsUpsert(true).setDoc("foo", "cat") + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); + assertEquals(1, update.getVersion()); + assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get(), "2"); + + // Update-becomes-delete with RefreshPolicy.WAIT_UNTIL + update = client().prepareUpdate("test", "test", "2").setScript(new Script("delete_plz", ScriptType.INLINE, "native", emptyMap())) + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).get(); + assertEquals(2, update.getVersion()); + assertFalse("request shouldn't have forced a refresh", update.forcedRefresh()); + assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "cat")).get()); + } + + public void testBulk() { + // Index by bulk with RefreshPolicy.WAIT_UNTIL + BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + bulk.add(client().prepareIndex("test", "test", "1").setSource("foo", "bar")); + assertBulkSuccess(bulk.get()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + + // Update by bulk with RefreshPolicy.WAIT_UNTIL + bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + bulk.add(client().prepareUpdate("test", "test", "1").setDoc("foo", "baz")); + assertBulkSuccess(bulk.get()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "baz")).get(), "1"); + + // Delete by bulk with RefreshPolicy.WAIT_UNTIL + bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + bulk.add(client().prepareDelete("test", "test", "1")); + assertBulkSuccess(bulk.get()); + assertNoSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get()); + + // Update makes a noop + bulk = client().prepareBulk().setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); + bulk.add(client().prepareDelete("test", "test", "1")); + assertBulkSuccess(bulk.get()); + } + + /** + * Tests that an explicit request makes block_until_refresh return. It doesn't check that block_until_refresh doesn't return until the + * explicit refresh if the interval is -1 because we don't have that kind of control over refresh. It can happen all on its own. + */ + public void testNoRefreshInterval() throws InterruptedException, ExecutionException { + client().admin().indices().prepareUpdateSettings("test").setSettings(singletonMap("index.refresh_interval", -1)).get(); + ListenableActionFuture index = client().prepareIndex("test", "index", "1").setSource("foo", "bar") + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).execute(); + while (false == index.isDone()) { + client().admin().indices().prepareRefresh("test").get(); + } + assertEquals(RestStatus.CREATED, index.get().status()); + assertFalse("request shouldn't have forced a refresh", index.get().forcedRefresh()); + assertSearchHits(client().prepareSearch("test").setQuery(matchQuery("foo", "bar")).get(), "1"); + } + + private void assertBulkSuccess(BulkResponse response) { + assertNoFailures(response); + for (BulkItemResponse item : response) { + assertFalse("request shouldn't have forced a refresh", item.getResponse().forcedRefresh()); + } + } + + @Override + protected Collection> nodePlugins() { + return singleton(DeletePlzPlugin.class); + } + + public static class DeletePlzPlugin extends Plugin { + @Override + public String name() { + return "delete_please"; + } + + @Override + public String description() { + return "adds a script that converts any update into a delete for testing"; + } + + public void onModule(ScriptModule scriptModule) { + scriptModule.registerScript("delete_plz", DeletePlzFactory.class); + } + } + + public static class DeletePlzFactory implements NativeScriptFactory { + @Override + public ExecutableScript newScript(Map params) { + return new ExecutableScript() { + private Map ctx; + + @Override + @SuppressWarnings("unchecked") // Elasicsearch convention + public void setNextVar(String name, Object value) { + if (name.equals("ctx")) { + ctx = (Map) value; + } + } + + @Override + public Object run() { + ctx.put("op", "delete"); + return null; + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 51df3ee0386..b7da3860003 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -170,6 +170,8 @@ public class InternalEngineTests extends ESTestCase { .put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, + between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) .build()); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); store = createStore(); @@ -200,7 +202,7 @@ public class InternalEngineTests extends ESTestCase { return new EngineConfig(openMode, config.getShardId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(), - config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter()); + config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners()); } @Override @@ -291,14 +293,16 @@ public class InternalEngineTests extends ESTestCase { } catch (IOException e) { throw new ElasticsearchException("can't find index?", e); } - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings - , null, store, createSnapshotDeletionPolicy(), mergePolicy, - iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), new Engine.EventListener() { + Engine.EventListener listener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + }; + EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), + mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, + new TranslogHandler(shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), null); return config; } @@ -309,18 +313,20 @@ public class InternalEngineTests extends ESTestCase { public void testSegments() throws Exception { try (Store store = createStore(); - Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { List segments = engine.segments(false); assertThat(segments.isEmpty(), equalTo(true)); assertThat(engine.segmentsStats(false).getCount(), equalTo(0L)); assertThat(engine.segmentsStats(false).getMemoryInBytes(), equalTo(0L)); - // create a doc and refresh + // create two docs and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.index(new Engine.Index(newUid("1"), doc)); - + Engine.Index first = new Engine.Index(newUid("1"), doc); + engine.index(first); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - engine.index(new Engine.Index(newUid("2"), doc2)); + Engine.Index second = new Engine.Index(newUid("2"), doc2); + engine.index(second); + assertThat(second.getTranslogLocation(), greaterThan(first.getTranslogLocation())); engine.refresh("test"); segments = engine.segments(false); @@ -2064,10 +2070,11 @@ public class InternalEngineTests extends ESTestCase { /* create a TranslogConfig that has been created with a different UUID */ TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, threadPool, config.getIndexSettings() - , null, store, createSnapshotDeletionPolicy(), newMergePolicy(), - config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener() - , config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + EngineConfig brokenConfig = new EngineConfig(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, shardId, threadPool, + config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(), + config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), config.getRefreshListeners()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index f71f2d72019..bed597b7cc5 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; @@ -213,7 +214,7 @@ public class ShadowEngineTests extends ESTestCase { } protected ShadowEngine createShadowEngine(IndexSettings indexSettings, Store store) { - return new ShadowEngine(config(indexSettings, store, null, null)); + return new ShadowEngine(config(indexSettings, store, null, null, null)); } protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath) { @@ -221,11 +222,12 @@ public class ShadowEngineTests extends ESTestCase { } protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy); + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null); return new InternalEngine(config); } - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + RefreshListeners refreshListeners) { IndexWriterConfig iwc = newIndexWriterConfig(); final EngineConfig.OpenMode openMode; try { @@ -237,14 +239,17 @@ public class ShadowEngineTests extends ESTestCase { } catch (IOException e) { throw new ElasticsearchException("can't find index?", e); } - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings - , null, store, createSnapshotDeletionPolicy(), mergePolicy, - iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(null, logger), new Engine.EventListener() { + Engine.EventListener eventListener = new Engine.EventListener() { @Override public void onFailedEngine(String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test - }}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5)); + } + }; + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), + mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, null, + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), refreshListeners); return config; } @@ -1011,4 +1016,11 @@ public class ShadowEngineTests extends ESTestCase { assertEquals(0, docStats.getDeleted()); primaryEngine.forceMerge(randomBoolean(), 1, false, false, false); } + + public void testRefreshListenersFails() throws IOException { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), + new RefreshListeners(null, null, null, logger)); + Exception e = expectThrows(IllegalArgumentException.class, () -> new ShadowEngine(config)); + assertEquals("ShadowEngine doesn't support RefreshListeners", e.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java index 702c9b85da4..7b106863341 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.all; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.Client; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; import org.elasticsearch.plugins.Plugin; @@ -87,7 +88,7 @@ public class AllFieldMapperPositionIncrementGapTests extends ESSingleNodeTestCas private static void testGap(Client client, String indexName, String type, int positionIncrementGap) throws IOException { client.prepareIndex(indexName, type, "position_gap_test") - .setSource("string1", "one", "string2", "two three").setRefresh(true).get(); + .setSource("string1", "one", "string2", "two three").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); // Baseline - phrase query finds matches in the same field value assertHitCount(client.prepareSearch(indexName) diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java new file mode 100644 index 00000000000..5c83e6805ab --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -0,0 +1,292 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; +import org.apache.lucene.index.SnapshotDeletionPolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.InternalEngineTests.TranslogHandler; +import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; +import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.TranslogConfig; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +/** + * Tests how {@linkplain RefreshListeners} interacts with {@linkplain InternalEngine}. + */ +public class RefreshListenersTests extends ESTestCase { + private RefreshListeners listeners; + private Engine engine; + private volatile int maxListeners; + private ThreadPool threadPool; + private Store store; + + @Before + public void setupListeners() throws Exception { + // Setup dependencies of the listeners + maxListeners = randomIntBetween(1, 1000); + listeners = new RefreshListeners( + () -> maxListeners, + () -> engine.refresh("too-many-listeners"), + // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test. + Runnable::run, + logger + ); + + // Now setup the InternalEngine which is much more complicated because we aren't mocking anything + threadPool = new ThreadPool(getTestName()); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); + ShardId shardId = new ShardId(new Index("index", "_na_"), 1); + Directory directory = newDirectory(); + DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { + @Override + public Directory newDirectory() throws IOException { + return directory; + } + + @Override + public long throttleTimeInNanos() { + return 0; + } + }; + store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + IndexWriterConfig iwc = newIndexWriterConfig(); + TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings, + BigArrays.NON_RECYCLING_INSTANCE); + Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, @Nullable Throwable t) { + // we don't need to notify anybody in this test + } + }; + EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool, indexSettings, null, + store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(), + iwc.getSimilarity(), new CodecService(null, logger), eventListener, new TranslogHandler(shardId.getIndexName(), logger), + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, + TimeValue.timeValueMinutes(5), listeners); + engine = new InternalEngine(config); + } + + @After + public void tearDownListeners() throws Exception { + IOUtils.close(engine, store); + terminate(threadPool); + } + + public void testTooMany() throws Exception { + assertFalse(listeners.refreshNeeded()); + Engine.Index index = index("1"); + + // Fill the listener slots + List nonForcedListeners = new ArrayList<>(maxListeners); + for (int i = 0; i < maxListeners; i++) { + DummyRefreshListener listener = new DummyRefreshListener(); + nonForcedListeners.add(listener); + listeners.addOrNotify(index.getTranslogLocation(), listener); + assertTrue(listeners.refreshNeeded()); + } + + // We shouldn't have called any of them + for (DummyRefreshListener listener : nonForcedListeners) { + assertNull("Called listener too early!", listener.forcedRefresh.get()); + } + + // Add one more listener which should cause a refresh. + DummyRefreshListener forcingListener = new DummyRefreshListener(); + listeners.addOrNotify(index.getTranslogLocation(), forcingListener); + assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get()); + + // That forces all the listeners through. It would be on the listener ThreadPool but we've made all of those execute immediately. + for (DummyRefreshListener listener : nonForcedListeners) { + assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get()); + } + assertFalse(listeners.refreshNeeded()); + } + + public void testAfterRefresh() throws Exception { + Engine.Index index = index("1"); + engine.refresh("I said so"); + if (randomBoolean()) { + index(randomFrom("1" /* same document */, "2" /* different document */)); + if (randomBoolean()) { + engine.refresh("I said so"); + } + } + + DummyRefreshListener listener = new DummyRefreshListener(); + listeners.addOrNotify(index.getTranslogLocation(), listener); + assertFalse(listener.forcedRefresh.get()); + } + + /** + * Attempts to add a listener at the same time as a refresh occurs by having a background thread force a refresh as fast as it can while + * adding listeners. This can catch the situation where a refresh happens right as the listener is being added such that the listener + * misses the refresh and has to catch the next one. If the listener wasn't able to properly catch the next one then this would fail. + */ + public void testConcurrentRefresh() throws Exception { + AtomicBoolean run = new AtomicBoolean(true); + Thread refresher = new Thread(() -> { + while (run.get()) { + engine.refresh("test"); + } + }); + refresher.start(); + try { + for (int i = 0; i < 100; i++) { + Engine.Index index = index("1"); + + DummyRefreshListener listener = new DummyRefreshListener(); + listeners.addOrNotify(index.getTranslogLocation(), listener); + assertBusy(() -> assertNotNull(listener.forcedRefresh.get())); + assertFalse(listener.forcedRefresh.get()); + } + } finally { + run.set(false); + refresher.join(); + } + } + + /** + * Uses a bunch of threads to index, wait for refresh, and non-realtime get documents to validate that they are visible after waiting + * regardless of what crazy sequence of events causes the refresh listener to fire. + */ + public void testLotsOfThreads() throws Exception { + int threadCount = between(3, 10); + maxListeners = between(1, threadCount * 2); + + // This thread just refreshes every once in a while to cause trouble. + ScheduledFuture refresher = threadPool.scheduleWithFixedDelay(() -> engine.refresh("because test"), timeValueMillis(100)); + + // These threads add and block until the refresh makes the change visible and then do a non-realtime get. + Thread[] indexers = new Thread[threadCount]; + for (int thread = 0; thread < threadCount; thread++) { + final String threadId = String.format(Locale.ROOT, "%04d", thread); + indexers[thread] = new Thread(() -> { + for (int iteration = 1; iteration <= 50; iteration++) { + try { + String testFieldValue = String.format(Locale.ROOT, "%s%04d", threadId, iteration); + Engine.Index index = index(threadId, testFieldValue); + assertEquals(iteration, index.version()); + + DummyRefreshListener listener = new DummyRefreshListener(); + listeners.addOrNotify(index.getTranslogLocation(), listener); + assertBusy(() -> assertNotNull("listener never called", listener.forcedRefresh.get())); + if (threadCount < maxListeners) { + assertFalse(listener.forcedRefresh.get()); + } + + Engine.Get get = new Engine.Get(false, index.uid()); + try (Engine.GetResult getResult = engine.get(get)) { + assertTrue("document not found", getResult.exists()); + assertEquals(iteration, getResult.version()); + SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); + getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); + assertEquals(Arrays.asList(testFieldValue), visitor.fields().get("test")); + } + } catch (Throwable t) { + throw new RuntimeException("failure on the [" + iteration + "] iteration of thread [" + threadId + "]", t); + } + } + }); + indexers[thread].start(); + } + + for (Thread indexer: indexers) { + indexer.join(); + } + FutureUtils.cancel(refresher); + } + + private Engine.Index index(String id) { + return index(id, "test"); + } + + private Engine.Index index(String id, String testFieldValue) { + String type = "test"; + String uid = type + ":" + id; + Document document = new Document(); + document.add(new TextField("test", testFieldValue, Field.Store.YES)); + Field uidField = new Field("_uid", type + ":" + id, UidFieldMapper.Defaults.FIELD_TYPE); + Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY); + document.add(uidField); + document.add(versionField); + BytesReference source = new BytesArray(new byte[] { 1 }); + ParsedDocument doc = new ParsedDocument(versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); + Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); + engine.index(index); + return index; + } + + private static class DummyRefreshListener implements Consumer { + /** + * When the listener is called this captures it's only argument. + */ + private AtomicReference forcedRefresh = new AtomicReference<>(); + + @Override + public void accept(Boolean forcedRefresh) { + assertNotNull(forcedRefresh); + Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); + assertNull("Listener called twice", oldValue); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index b4d2423921c..889897e21f6 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; @@ -44,6 +45,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.hamcrest.Matchers; @@ -204,18 +206,40 @@ public class TranslogTests extends ESTestCase { } public void testRead() throws IOException { + Location loc0 = translog.getLastWriteLocation(); + assertNotNull(loc0); + Translog.Location loc1 = translog.add(new Translog.Index("test", "1", new byte[]{1})); + assertThat(loc1, greaterThan(loc0)); + assertThat(translog.getLastWriteLocation(), greaterThan(loc1)); Translog.Location loc2 = translog.add(new Translog.Index("test", "2", new byte[]{2})); + assertThat(loc2, greaterThan(loc1)); + assertThat(translog.getLastWriteLocation(), greaterThan(loc2)); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); + + Translog.Location lastLocBeforeSync = translog.getLastWriteLocation(); translog.sync(); + assertEquals(lastLocBeforeSync, translog.getLastWriteLocation()); assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1}))); assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2}))); + Translog.Location loc3 = translog.add(new Translog.Index("test", "2", new byte[]{3})); + assertThat(loc3, greaterThan(loc2)); + assertThat(translog.getLastWriteLocation(), greaterThan(loc3)); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); + + lastLocBeforeSync = translog.getLastWriteLocation(); translog.sync(); + assertEquals(lastLocBeforeSync, translog.getLastWriteLocation()); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.prepareCommit(); + /* + * The commit adds to the lastWriteLocation even though is isn't really a write. This is just an implementation artifact but it can + * safely be ignored because the lastWriteLocation continues to be greater than the Location returned from the last write operation + * and less than the location of the next write operation. + */ + assertThat(translog.getLastWriteLocation(), greaterThan(lastLocBeforeSync)); assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); translog.commit(); assertNull(translog.read(loc1)); diff --git a/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java index 6a4de4b9ff2..6a4d965706a 100644 --- a/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.routing; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -45,7 +46,7 @@ public class AliasRoutingIT extends ESIntegTestCase { assertAcked(admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0"))); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -72,7 +73,7 @@ public class AliasRoutingIT extends ESIntegTestCase { logger.info("--> deleting with no routing, should not delete anything"); - client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); @@ -80,7 +81,7 @@ public class AliasRoutingIT extends ESIntegTestCase { } logger.info("--> deleting with routing alias, should delete"); - client().prepareDelete("alias0", "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete("alias0", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); @@ -88,7 +89,7 @@ public class AliasRoutingIT extends ESIntegTestCase { } logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -110,7 +111,7 @@ public class AliasRoutingIT extends ESIntegTestCase { .addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1"))); logger.info("--> indexing with id [1], and routing [0] using alias"); - client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -142,7 +143,7 @@ public class AliasRoutingIT extends ESIntegTestCase { } logger.info("--> indexing with id [2], and routing [1] using alias"); - client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { @@ -207,7 +208,7 @@ public class AliasRoutingIT extends ESIntegTestCase { .addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1"))); ensureGreen(); // wait for events again to make sure we got the aliases on all nodes logger.info("--> indexing with id [1], and routing [0] using alias to test-a"); - client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -218,7 +219,7 @@ public class AliasRoutingIT extends ESIntegTestCase { } logger.info("--> indexing with id [0], and routing [1] using alias to test-b"); - client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -261,9 +262,9 @@ public class AliasRoutingIT extends ESIntegTestCase { .addAliasAction(newAddAliasAction("index", "index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search all on index_* should find two"); @@ -286,9 +287,9 @@ public class AliasRoutingIT extends ESIntegTestCase { .addAliasAction(newAddAliasAction("index", "index_1").routing("1"))); logger.info("--> indexing on index_1 which is an alias for index with routing [1]"); - client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> indexing on index_2 which is a concrete index"); - client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet(); + client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); SearchResponse searchResponse = client().prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); @@ -307,7 +308,7 @@ public class AliasRoutingIT extends ESIntegTestCase { .addAliasAction(newAddAliasAction("test", "alias").routing("3"))); logger.info("--> indexing with id [0], and routing [3]"); - client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); @@ -332,7 +333,7 @@ public class AliasRoutingIT extends ESIntegTestCase { .addAliasAction(newAddAliasAction("test", "alias").searchRouting("3,4").indexRouting("4"))); logger.info("--> indexing with id [1], and routing [4]"); - client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefresh(true).execute().actionGet(); + client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should not find anything"); logger.info("--> verifying get and search with routing, should find"); diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index 03e6cbf9ef1..d8cf1e7b5ec 100644 --- a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; @@ -56,7 +57,8 @@ public class SimpleRoutingIT extends ESIntegTestCase { ensureGreen(); logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -67,21 +69,22 @@ public class SimpleRoutingIT extends ESIntegTestCase { } logger.info("--> deleting with no routing, should not delete anything"); - client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); } logger.info("--> deleting with routing, should delete"); - client().prepareDelete("test", "type1", "1").setRouting("0").setRefresh(true).execute().actionGet(); + client().prepareDelete("test", "type1", "1").setRouting("0").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); } logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -97,7 +100,8 @@ public class SimpleRoutingIT extends ESIntegTestCase { ensureGreen(); logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> verifying get with no routing, should not find anything"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); @@ -125,7 +129,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { } logger.info("--> indexing with id [2], and routing [1]"); - client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { @@ -165,12 +169,13 @@ public class SimpleRoutingIT extends ESIntegTestCase { ensureGreen(); logger.info("--> indexing with id [1], and routing [0]"); - client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with no routing, should fail"); logger.info("--> indexing with id [1], with no routing, should fail"); try { - client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); + client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1").get(); fail("index with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); @@ -183,7 +188,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { logger.info("--> deleting with no routing, should fail"); try { - client().prepareDelete(indexOrAlias(), "type1", "1").setRefresh(true).execute().actionGet(); + client().prepareDelete(indexOrAlias(), "type1", "1").get(); fail("delete with missing routing when routing is required should fail"); } catch (ElasticsearchException e) { assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class)); @@ -223,7 +228,7 @@ public class SimpleRoutingIT extends ESIntegTestCase { assertThat(getResponse.getSourceAsMap().get("field"), equalTo("value2")); } - client().prepareDelete(indexOrAlias(), "type1", "1").setRouting("0").setRefresh(true).execute().actionGet(); + client().prepareDelete(indexOrAlias(), "type1", "1").setRouting("0").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); for (int i = 0; i < 5; i++) { try { @@ -320,7 +325,8 @@ public class SimpleRoutingIT extends ESIntegTestCase { logger.info("--> indexing with id [1], and routing [0]"); client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").get(); logger.info("--> indexing with id [2], and routing [0]"); - client().prepareIndex(indexOrAlias(), "type1", "2").setRouting("0").setSource("field", "value2").setRefresh(true).get(); + client().prepareIndex(indexOrAlias(), "type1", "2").setRouting("0").setSource("field", "value2") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); logger.info("--> verifying get with id [1] with routing [0], should succeed"); assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 90f5c65c066..f8ca1e1aaf7 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; import org.elasticsearch.common.settings.Settings; @@ -754,10 +755,11 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertNoFailures(response); assertThat(response.getHits().totalHits(), equalTo(0L)); - client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()).setRefresh(true) - .get(); + client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); + response = client().prepareSearch("test") + .setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); assertNoFailures(response); assertThat(response.getHits().totalHits(), equalTo(0L)); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java index 812e43918d9..484edf61f7b 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearch2xIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -473,7 +474,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, PRE2X_VERSION.id)) .addMapping(TYPE, mapping)); client().prepareIndex(INDEX, TYPE, "1") - .setRefresh(true) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -496,7 +497,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { ).execute().actionGet(); assertSuggestions(suggestResponse, "suggs"); - client().prepareIndex(INDEX, TYPE, "1").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -522,7 +523,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { .addMapping(TYPE, mapping) .setSettings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, PRE2X_VERSION.id))); client().prepareIndex(INDEX, TYPE, "1") - .setRefresh(true) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -545,7 +546,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { ).execute().actionGet(); assertSuggestions(suggestResponse, "suggs"); - client().prepareIndex(INDEX, TYPE, "1").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get(); ensureGreen(INDEX); @@ -731,10 +732,10 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { assertThat(putMappingResponse.isAcknowledged(), is(true)); // Index two entities - client().prepareIndex(INDEX, TYPE, "1").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()) .get(); - client().prepareIndex(INDEX, TYPE, "2").setRefresh(true) + client().prepareIndex(INDEX, TYPE, "2").setRefreshPolicy(RefreshPolicy.IMMEDIATE) .setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()) .get(); @@ -1040,7 +1041,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { .startArray("input").value(str).endArray() .field("output", "foobar") .endObject().endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); // need to flush and refresh, because we keep changing the same document // we have to make sure that segments without any live documents are deleted flushAndRefresh(); @@ -1074,7 +1075,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { .startArray("input").value(longString).endArray() .field("output", "foobar") .endObject().endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } @@ -1096,7 +1097,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { .startArray("input").value(string).endArray() .field("output", "foobar") .endObject().endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); fail("expected MapperParsingException"); } catch (MapperParsingException expected) {} } @@ -1116,7 +1117,7 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { .startObject() .field(FIELD, string) .endObject() - ).setRefresh(true).get(); + ).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); try { client().prepareSearch(INDEX).addAggregation(AggregationBuilders.terms("suggest_agg").field(FIELD) @@ -1148,11 +1149,11 @@ public class CompletionSuggestSearch2xIT extends ESIntegTestCase { ensureGreen(); client().prepareIndex(INDEX, TYPE, "1").setSource(FIELD, "strings make me happy", FIELD + "_1", "nulls make me sad") - .setRefresh(true).get(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); try { client().prepareIndex(INDEX, TYPE, "2").setSource(FIELD, null, FIELD + "_1", "nulls make me sad") - .setRefresh(true).get(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); fail("Expected MapperParsingException for null value"); } catch (MapperParsingException e) { // make sure that the exception has the name of the field causing the error diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index f3b30e7f0c3..04049663e84 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -15,6 +15,8 @@ This section describes the following CRUD APIs: .Multi-document APIs * <> * <> +* <> +* <> NOTE: All CRUD APIs are single-index APIs. The `index` parameter accepts a single index name, or an `alias` which points to a single index. @@ -42,3 +44,5 @@ include::docs/reindex.asciidoc[] include::docs/termvectors.asciidoc[] include::docs/multi-termvectors.asciidoc[] + +include::docs/refresh.asciidoc[] diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index ee8f75c6cdd..6b0a5b2e40d 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -167,12 +167,8 @@ are the same). [[bulk-refresh]] === Refresh -The `refresh` parameter can be set to `true` in order to refresh the relevant -primary and replica shards immediately after the bulk operation has occurred -and make it searchable, instead of waiting for the normal refresh interval to -expire. Setting it to `true` can trigger additional load, and may slow down -indexing. Due to its costly nature, the `refresh` parameter is set on the bulk request level -and is not supported on each individual bulk item. +Control when the changes made by this request are visible to search. See +<>. [float] [[bulk-update]] diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 175c07d005e..18a370fc416 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -113,11 +113,9 @@ is the same). [[delete-refresh]] === Refresh -The `refresh` parameter can be set to `true` in order to refresh the relevant -primary and replica shards after the delete operation has occurred and make it -searchable. Setting it to `true` should be done after careful thought and -verification that this does not cause a heavy load on the system (and slows -down indexing). +Control when the changes made by this request are visible to search. See +<>. + [float] [[delete-timeout]] diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index eb1e45d6160..aa62b65292e 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -30,7 +30,8 @@ The result of the above index operation is: "_type" : "tweet", "_id" : "1", "_version" : 1, - "created" : true + "created" : true, + "forced_refresh": false } -------------------------------------------------- // TESTRESPONSE[s/"successful" : 2/"successful" : 1/] @@ -221,7 +222,8 @@ The result of the above index operation is: "_type" : "tweet", "_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32", "_version" : 1, - "created" : true + "created" : true, + "forced_refresh": false } -------------------------------------------------- // TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/] @@ -385,13 +387,8 @@ replication group have indexed the document (sync replication). [[index-refresh]] === Refresh -To refresh the shard (not the whole index) immediately after the operation -occurs, so that the document appears in search results immediately, the -`refresh` parameter can be set to `true`. Setting this option to `true` should -*ONLY* be done after careful thought and verification that it does not lead to -poor performance, both from an indexing and a search standpoint. Note, getting -a document using the get API is completely realtime and doesn't require a -refresh. +Control when the changes made by this request are visible to search. See +<>. [float] [[index-noop]] diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc new file mode 100644 index 00000000000..3e5153341c8 --- /dev/null +++ b/docs/reference/docs/refresh.asciidoc @@ -0,0 +1,109 @@ +[[docs-refresh]] +== `?refresh` + +The <>, <>, <>, and +<> APIs support setting `refresh` to control when changes made +by this request are made visible to search. These are the allowed values: + +Empty string or `true`:: + +Refresh the relevant primary and replica shards (not the whole index) +immediately after the operation occurs, so that the updated document appears +in search results immediately. This should *ONLY* be done after careful thought +and verification that it does not lead to poor performance, both from an +indexing and a search standpoint. + +`wait_for`:: + +Wait for the changes made by the request to be made visible by a refresh before +replying. This doesn't force an immediate refresh, rather, it waits for a +refresh happen. Elasticsearch automatically refreshes shards that have changed +every `index.refresh_interval` which defaults to one second. That setting is +<>. The <> API will also +cause the request to return, as will setting `refresh` to `true` on any of the +APIs that support it. + +`false` (the default):: + +Take no refresh related actions. The changes made by this request will be made +visible at some point after the request returns. + +=== Choosing which setting to use + +Unless you have a good reason to wait for the change to become visible always +use `refresh=false`, or, because that is the default, just leave the `refresh` +parameter out of the URL. That is the simplest and fastest choice. + +If you absolutely must have the changes made by a request visible synchronously +with the request then you must get to pick between putting more load on +Elasticsearch (`true`) and waiting longer for the response (`wait_for`). Here +are a few points that should inform that decision: + +* The more changes being made to the index the more work `wait_for` saves +compared to `true`. In the case that the index is only changed once every +`index.refresh_interval` then it saves no work. +* `true` creates less efficient indexes constructs (tiny segments) that must +later be merged into more efficient index constructs (larger segments). Meaning +that the cost of `true` is payed at index time to create the tiny segment, at +search time to search the tiny segment, and at merge time to make the larger +segments. +* Never start multiple `refresh=wait_for` requests in a row. Instead batch them +into a single bulk request with `refresh=wait_for` and Elasticsearch will start +them all in parallel and return only when they have all finished. +* If the refresh interval is set to `-1`, disabling the automatic refreshes, +then requests with `refresh=wait_for` will wait indefinitely until some action +causes a refresh. Conversely, setting `index.refresh_interval` to something +shorter than the default like `200ms` will make `refresh=wait_for` come back +faster, but it'll still generate inefficient segments. +* `refresh=wait_for` only affects the request that it is on, but, by forcing a +refresh immediately, `refresh=true` will affect other ongoing request. In +general, if you have a running system you don't wish to disturb then +`refresh=wait_for` is a smaller modification. + +=== `refresh=wait_for` Can Force a Refresh + +If a `refresh=wait_for` request comes in when there are already +`index.max_refresh_listeners` (defaults to 1000) requests waiting for a refresh +on that shard then that request will behave just as though it had `refresh` set +to `true` instead: it will force a refresh. This keeps the promise that when a +`refresh=wait_for` request returns that its changes are visible for search +while preventing unchecked resource usage for blocked requests. If a request +forced a refresh because it ran out of listener slots then its response will +contain `"forced_refresh": true`. + +Bulk requests only take up one slot on each shard that they touch no matter how +many times they modify the shard. + +=== Examples + +These will create a document and immediately refresh the index so it is visible: + +[source,js] +-------------------------------------------------- +PUT /test/test/1?refresh +{"test": "test"} +PUT /test/test/2?refresh=true +{"test": "test"} +-------------------------------------------------- +// CONSOLE + +These will create a document without doing anything to make it visible for +search: + +[source,js] +-------------------------------------------------- +PUT /test/test/3 +{"test": "test"} +PUT /test/test/4?refresh=true +{"test": "test"} +-------------------------------------------------- +// CONSOLE + +This will create a document and wait for it to become visible for search: + +[source,js] +-------------------------------------------------- +PUT /test/test/4?refresh=wait_for +{"test": "test"} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 405f9b0494b..39261c5d21f 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -235,9 +235,8 @@ The write consistency of the index/delete operation. `refresh`:: -Refresh the relevant primary and replica shards (not the whole index) -immediately after the operation occurs, so that the updated document appears -in search results immediately. +Control when the changes made by this request are visible to search. See +<>. `fields`:: diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index c7119af4168..3cc95e5af9a 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -136,6 +136,11 @@ specific index module: experimental[] Disables the purge of <> on the current index. +`index.max_refresh_listeners`:: + + Maximum number of refresh listeners available on each shard of the index. + These listeners are used to implement <>. + [float] === Settings in other index modules diff --git a/docs/reference/migration/migrate_5_0/docs.asciidoc b/docs/reference/migration/migrate_5_0/docs.asciidoc index 85e4e901e5c..9149eed6142 100644 --- a/docs/reference/migration/migrate_5_0/docs.asciidoc +++ b/docs/reference/migration/migrate_5_0/docs.asciidoc @@ -1,6 +1,16 @@ [[breaking_50_document_api_changes]] === Document API changes +==== `?refresh` no longer supports truthy and falsy values +The `?refresh` request parameter used to accept any value other than `false`, +`0`, `off`, and `no` to mean "make the changes from this request visible for +search immediately." Now it only accepts `?refresh` and `?refresh=true` to +mean that. You can set it to `?refresh=false` and the request will take no +refresh-related action. The same is true if you leave `refresh` off of the +url entirely. If you add `?refresh=wait_for` Elasticsearch will wait for the +changes to become visible before replying to the request but won't take any +immediate refresh related action. See <>. + ==== Reindex and Update By Query Before 5.0.0 `_reindex` and `_update_by_query` only retried bulk failures so they used the following response format: diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc index da97d360b43..a16f2a9ff9b 100644 --- a/docs/reference/migration/migrate_5_0/java.asciidoc +++ b/docs/reference/migration/migrate_5_0/java.asciidoc @@ -304,3 +304,8 @@ The `setQuery(BytesReference)` method have been removed in favor of using `setQu Removed the `getMemoryAvailable` method from `OsStats`, which could be previously accessed calling `clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`. + +=== setRefresh(boolean) has been deprecated + +`setRefresh(boolean)` has been deprecated in favor of `setRefreshPolicy(RefreshPolicy)` because there +are now three options. It will be removed in 5.0. diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java index dd9daa75e76..13868566eac 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BulkTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; @@ -584,7 +585,7 @@ public class BulkTests extends ESIntegTestCase { .add(new IndexRequest("test", "type", "4").source("{ \"title\" : \"Great Title of doc 4\" }")) .add(new IndexRequest("test", "type", "5").source("{ \"title\" : \"Great Title of doc 5\" }")) .add(new IndexRequest("test", "type", "6").source("{ \"title\" : \"Great Title of doc 6\" }")) - .setRefresh(true) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); assertNoFailures(indexBulkItemResponse); @@ -622,7 +623,7 @@ public class BulkTests extends ESIntegTestCase { .add(new IndexRequest("bulkindex2", "index2_type").source("text", "hallo2")) .add(new UpdateRequest("bulkindex2", "index2_type", "2").doc("foo", "bar")) .add(new DeleteRequest("bulkindex2", "index2_type", "3")) - .refresh(true); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE); client().bulk(bulkRequest).get(); SearchResponse searchResponse = client().prepareSearch("bulkindex*").get(); @@ -643,10 +644,10 @@ public class BulkTests extends ESIntegTestCase { client().prepareIndex("bulkindex1", "index1_type", "1").setSource("text", "test").get(); assertAcked(client().admin().indices().prepareClose("bulkindex1")); - BulkRequest bulkRequest = new BulkRequest(); + BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(RefreshPolicy.IMMEDIATE); bulkRequest.add(new IndexRequest("bulkindex1", "index1_type", "1").source("text", "hallo1")) .add(new UpdateRequest("bulkindex1", "index1_type", "1").doc("foo", "bar")) - .add(new DeleteRequest("bulkindex1", "index1_type", "1")).refresh(true); + .add(new DeleteRequest("bulkindex1", "index1_type", "1")); BulkResponse bulkResponse = client().bulk(bulkRequest).get(); assertThat(bulkResponse.hasFailures(), is(true)); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java index ed9b65130a9..20c8c7f8b58 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.Requests; import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -294,8 +295,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); cluster().wipeIndices("test"); createIndex("test"); @@ -308,8 +309,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); } // see #2814 @@ -338,8 +339,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("source", "productizer") .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("filingcategory:s"))) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); refresh(); PercolateResponse percolate = preparePercolate(client()) @@ -417,8 +418,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); logger.info("--> register a query 2"); client().prepareIndex(INDEX_NAME, TYPE_NAME, "bubu") @@ -426,8 +427,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "green") .field("query", termQuery("field1", "value2")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); PercolateResponse percolate = preparePercolate(client()) .setIndices(INDEX_NAME).setDocumentType("type1") @@ -461,8 +462,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "blue") .field("query", termQuery("field1", "value1")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); PercolateResponse percolate = preparePercolate(client()) .setIndices(INDEX_NAME).setDocumentType("type1") @@ -478,8 +479,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "green") .field("query", termQuery("field1", "value2")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); percolate = preparePercolate(client()) .setIndices(INDEX_NAME).setDocumentType("type1") @@ -495,8 +496,8 @@ public class PercolatorIT extends ESIntegTestCase { .field("color", "red") .field("query", termQuery("field1", "value2")) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); PercolateSourceBuilder sourceBuilder = new PercolateSourceBuilder() .setDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value2").endObject())) @@ -510,7 +511,7 @@ public class PercolatorIT extends ESIntegTestCase { assertThat(convertFromTextArray(percolate.getMatches(), INDEX_NAME), arrayContaining("susu")); logger.info("--> deleting query 1"); - client().prepareDelete(INDEX_NAME, TYPE_NAME, "kuku").setRefresh(true).execute().actionGet(); + client().prepareDelete(INDEX_NAME, TYPE_NAME, "kuku").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); percolate = preparePercolate(client()) .setIndices(INDEX_NAME).setDocumentType("type1") @@ -1461,8 +1462,8 @@ public class PercolatorIT extends ESIntegTestCase { .must(QueryBuilders.queryStringQuery("root")) .must(QueryBuilders.termQuery("message", "tree")))) .endObject()) - .setRefresh(true) - .execute().actionGet(); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + .get(); refresh(); PercolateResponse percolate = preparePercolate(client()) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 590054b04a4..a75daf35204 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -22,8 +22,9 @@ "description" : "Explicit write consistency setting for the operation" }, "refresh": { - "type" : "boolean", - "description" : "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "routing": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index be09c0179d4..5bb0e3fed4c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -33,8 +33,9 @@ "description" : "ID of parent document" }, "refresh": { - "type" : "boolean", - "description" : "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "routing": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 5c13f67c212..b7f7eeb9ef5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -38,8 +38,9 @@ "description" : "ID of the parent document" }, "refresh": { - "type" : "boolean", - "description" : "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "routing": { "type" : "string", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 20fc3524283..4a3f134301d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -41,8 +41,9 @@ "description": "ID of the parent document. Is is only used for routing and when for the upsert request" }, "refresh": { - "type": "boolean", - "description": "Refresh the index after performing the operation" + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` then refresh the effected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes." }, "retry_on_conflict": { "type": "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml new file mode 100644 index 00000000000..4906975bfab --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yaml @@ -0,0 +1,48 @@ +--- +"refresh=true immediately makes changes are visible in search": + - do: + bulk: + refresh: true + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} + +--- +"refresh=empty string immediately makes changes are visible in search": + - do: + bulk: + refresh: "" + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} + + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + bulk: + refresh: wait_for + body: | + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} + {"f1": "v1", "f2": 42} + {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} + {"f1": "v2", "f2": 47} + + - do: + count: + index: test_index + - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml index 99bfbc3cff6..90dc28bcfc0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yaml @@ -33,8 +33,9 @@ index: test_1 type: test id: 2 - refresh: 1 + refresh: true body: { foo: bar } + - is_true: forced_refresh - do: search: @@ -44,3 +45,42 @@ query: { term: { _id: 2 }} - match: { hits.total: 1 } + +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + create: + index: test_1 + type: test + id: 1 + refresh: "" + body: { foo: bar } + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + + - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml index 4d3f9fe039d..9ea6bc033de 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yaml @@ -19,7 +19,7 @@ type: test id: 1 body: { foo: bar } - refresh: 1 + refresh: true # If you wonder why this document get 3 as an id instead of 2, it is because the # current routing algorithm would route 1 and 2 to the same shard while we need @@ -30,7 +30,8 @@ type: test id: 3 body: { foo: bar } - refresh: 1 + refresh: true + - is_true: forced_refresh - do: search: @@ -61,7 +62,7 @@ index: test_1 type: test id: 3 - refresh: 1 + refresh: true # If a replica shard where doc 1 is located gets initialized at this point, doc 1 # won't be found by the following search as the shard gets automatically refreshed @@ -75,3 +76,72 @@ query: { terms: { _id: [1,3] }} - match: { hits.total: 1 } + +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + delete: + index: test_1 + type: test + id: 1 + refresh: "" + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 0 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + delete: + index: test_1 + type: test + id: 1 + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml index af6ea59766f..4ee26411432 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yaml @@ -33,8 +33,9 @@ index: test_1 type: test id: 2 - refresh: 1 + refresh: true body: { foo: bar } + - is_true: forced_refresh - do: search: @@ -44,3 +45,42 @@ query: { term: { _id: 2 }} - match: { hits.total: 1 } + +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + index: + index: test_1 + type: test + id: 1 + refresh: "" + body: { foo: bar } + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + + - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: wait_for + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml index 6048292ceab..8c0e7e66c97 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yaml @@ -35,10 +35,11 @@ index: test_1 type: test id: 2 - refresh: 1 + refresh: true body: doc: { foo: baz } upsert: { foo: bar } + - is_true: forced_refresh - do: search: @@ -48,3 +49,70 @@ query: { term: { _id: 2 }} - match: { hits.total: 1 } + +--- +"When refresh url parameter is an empty string that means \"refresh immediately\"": + - do: + index: + index: test_1 + type: test + id: 1 + refresh: true + body: { foo: bar } + - is_true: forced_refresh + + - do: + update: + index: test_1 + type: test + id: 1 + refresh: "" + body: + doc: {cat: dog} + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { cat: dog }} + + - match: { hits.total: 1 } + +--- +"refresh=wait_for waits until changes are visible in search": + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + refresh: true + - is_true: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { term: { _id: 1 }} + - match: { hits.total: 1 } + + - do: + update: + index: test_1 + type: test + id: 1 + refresh: wait_for + body: + doc: { test: asdf } + - is_false: forced_refresh + + - do: + search: + index: test_1 + type: test + body: + query: { match: { test: asdf } } + - match: { hits.total: 1 } From d594d6c07cb33635cf72edc84efaa7e4f4ee31c6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 6 Jun 2016 18:14:05 +0200 Subject: [PATCH 24/39] [TEST] ensure green before we filter allocations otherwise follow up ensureGreen() will fail --- .../action/admin/indices/create/CreateIndexIT.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 31c5749697b..60c4d0f486e 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -300,6 +300,10 @@ public class CreateIndexIT extends ESIntegTestCase { assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); // relocate all shards to one node such that we can merge it. client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder() @@ -343,6 +347,10 @@ public class CreateIndexIT extends ESIntegTestCase { DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); String spareNode = discoveryNodes[0].getName(); String mergeNode = discoveryNodes[1].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); // relocate all shards to one node such that we can merge it. client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode) From 260b0fd40f117b65902a63d838d00a63cdc4d50e Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 10:49:40 -0700 Subject: [PATCH 25/39] Stubbed foreach node. --- .../elasticsearch/painless/antlr/Walker.java | 12 +++++ .../elasticsearch/painless/node/SEach.java | 51 +++++++++++++++++++ .../painless/node/package-info.java | 3 +- 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 0f155ceb4c1..3e315bbcb24 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -53,6 +53,7 @@ import org.elasticsearch.painless.antlr.PainlessParser.DeclvarContext; import org.elasticsearch.painless.antlr.PainlessParser.DelimiterContext; import org.elasticsearch.painless.antlr.PainlessParser.DoContext; import org.elasticsearch.painless.antlr.PainlessParser.DynamicContext; +import org.elasticsearch.painless.antlr.PainlessParser.EachContext; import org.elasticsearch.painless.antlr.PainlessParser.EmptyContext; import org.elasticsearch.painless.antlr.PainlessParser.ExprContext; import org.elasticsearch.painless.antlr.PainlessParser.ExpressionContext; @@ -118,6 +119,7 @@ import org.elasticsearch.painless.node.SContinue; import org.elasticsearch.painless.node.SDeclBlock; import org.elasticsearch.painless.node.SDeclaration; import org.elasticsearch.painless.node.SDo; +import org.elasticsearch.painless.node.SEach; import org.elasticsearch.painless.node.SExpression; import org.elasticsearch.painless.node.SFor; import org.elasticsearch.painless.node.SIf; @@ -271,6 +273,16 @@ public final class Walker extends PainlessParserBaseVisitor { } } + @Override + public Object visitEach(EachContext ctx) { + String type = ctx.decltype().getText(); + String name = ctx.ID().getText(); + AExpression expression = (AExpression)visit(ctx.expression()); + SBlock block = (SBlock)visit(ctx.trailer()); + + return new SEach(location(ctx), type, name, expression, block); + } + @Override public Object visitDecl(DeclContext ctx) { return visit(ctx.declaration()); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java new file mode 100644 index 00000000000..aacde9b48b1 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.Variables; + +public class SEach extends AStatement { + final String type; + final String name; + AExpression expression; + AStatement block; + + public SEach(final Location location, final String type, final String name, final AExpression expression, final SBlock block) { + super(location); + + this.type = type; + this.name = name; + this.expression = expression; + this.block = block; + } + + + @Override + AStatement analyze(Variables variables) { + return null; + } + + @Override + void write(MethodWriter writer) { + + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java index 13ef13bdd77..9e693510359 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java @@ -90,7 +90,8 @@ *

    * Generally, statement nodes have member data that evaluate legal control-flow during the analysis phase. * The typical order for statement nodes is for each node to call analyze on it's children during the analysis phase - * and write on it's children during the writing phase. No modifications are made to the structure of statement nodes. + * and write on it's children during the writing phase. Upon analysis completion, a statement will return either + * itself or another statement node depending on if a shortcut or def type was found. *

    * Generally, expression nodes have member data that evaluate static types. The typical order for an expression node * during the analysis phase looks like the following: From b2e648dfdefc413ecf6f05d5a32dd59eb9ef32ad Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 12:33:25 -0700 Subject: [PATCH 26/39] Implementation of iterable. --- .../org/elasticsearch/painless/Variables.java | 6 +- .../elasticsearch/painless/antlr/Walker.java | 12 +- .../elasticsearch/painless/node/LBrace.java | 2 +- .../elasticsearch/painless/node/SEach.java | 121 +++++++++++++++++- .../org/elasticsearch/painless/node/SFor.java | 2 +- .../painless/BasicStatementTests.java | 5 + 6 files changed, 136 insertions(+), 12 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java index 16130476c38..4905011520a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Variables.java @@ -138,15 +138,15 @@ public final class Variables { public void decrementScope() { int remove = scopes.pop(); - + while (remove > 0) { Variable variable = variables.pop(); - + // TODO: is this working? the code reads backwards... if (variable.read) { throw variable.location.createError(new IllegalArgumentException("Variable [" + variable.name + "] never used.")); } - + --remove; } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 3e315bbcb24..a309fccdd27 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -263,11 +263,9 @@ public final class Walker extends PainlessParserBaseVisitor { if (ctx.trailer() != null) { SBlock block = (SBlock)visit(ctx.trailer()); - return new SFor(location(ctx), - settings.getMaxLoopCounter(), initializer, expression, afterthought, block); + return new SFor(location(ctx), settings.getMaxLoopCounter(), initializer, expression, afterthought, block); } else if (ctx.empty() != null) { - return new SFor(location(ctx), - settings.getMaxLoopCounter(), initializer, expression, afterthought, null); + return new SFor(location(ctx), settings.getMaxLoopCounter(), initializer, expression, afterthought, null); } else { throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); } @@ -275,12 +273,16 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public Object visitEach(EachContext ctx) { + if (settings.getMaxLoopCounter() > 0) { + reserved.usesLoop(); + } + String type = ctx.decltype().getText(); String name = ctx.ID().getText(); AExpression expression = (AExpression)visit(ctx.expression()); SBlock block = (SBlock)visit(ctx.trailer()); - return new SEach(location(ctx), type, name, expression, block); + return new SEach(location(ctx), settings.getMaxLoopCounter(), type, name, expression, block); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java index 4a13e03a490..b0816540c5e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/LBrace.java @@ -47,7 +47,7 @@ public final class LBrace extends ALink { throw createError(new IllegalArgumentException("Illegal array access made without target.")); } - final Sort sort = before.sort; + Sort sort = before.sort; if (sort == Sort.ARRAY) { index.expected = Definition.INT_TYPE; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index aacde9b48b1..8d4052326fc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -19,19 +19,40 @@ package org.elasticsearch.painless.node; +import org.elasticsearch.painless.AnalyzerCaster; +import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Definition.Cast; +import org.elasticsearch.painless.Definition.Method; +import org.elasticsearch.painless.Definition.MethodKey; +import org.elasticsearch.painless.Definition.Sort; +import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Variables.Variable; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; public class SEach extends AStatement { + + final int maxLoopCounter; final String type; final String name; AExpression expression; AStatement block; - public SEach(final Location location, final String type, final String name, final AExpression expression, final SBlock block) { + Variable variable = null; + Variable iterator = null; + Method method = null; + Method hasNext = null; + Method next = null; + Cast cast = null; + + public SEach(final Location location, final int maxLoopCounter, + final String type, final String name, final AExpression expression, final SBlock block) { super(location); + this.maxLoopCounter = maxLoopCounter; this.type = type; this.name = name; this.expression = expression; @@ -41,11 +62,107 @@ public class SEach extends AStatement { @Override AStatement analyze(Variables variables) { - return null; + expression.analyze(variables); + expression.expected = expression.actual; + expression = expression.cast(variables); + + Sort sort = expression.actual.sort; + + if (sort == Sort.ARRAY) { + throw location.createError(new UnsupportedOperationException("Cannot execute for each against array type.")); + } else if (sort == Sort.DEF) { + throw location.createError(new UnsupportedOperationException("Cannot execute for each against def type.")); + } else if (Iterable.class.isAssignableFrom(expression.actual.clazz)) { + final Type type; + + try { + type = Definition.getType(this.type); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); + } + + variables.incrementScope(); + + Type itr = Definition.getType("Iterator"); + + variable = variables.addVariable(location, type, name, false, false); + iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); + + method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); + + if (method == null) { + throw location.createError(new IllegalArgumentException( + "Unable to create iterator for the type [" + expression.actual.name + "].")); + } + + hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); + + if (hasNext == null) { + throw location.createError(new IllegalArgumentException("Method [hasNext] does not exist for type [Iterator].")); + } else if (hasNext.rtn.sort != Sort.BOOL) { + throw location.createError(new IllegalArgumentException("Method [hasNext] does not return type [boolean].")); + } + + next = itr.struct.methods.get(new MethodKey("next", 0)); + + if (next == null) { + throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); + } else if (next.rtn.sort != Sort.OBJECT) { + throw location.createError(new IllegalArgumentException("Method [next] does not return type [Object].")); + } + + cast = AnalyzerCaster.getLegalCast(location, Definition.getType("Object"), type, true, true); + + if (block == null) { + throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); + } + + block = block.analyze(variables); + block.statementCount = Math.max(1, block.statementCount); + + if (block.loopEscape && !block.anyContinue) { + throw createError(new IllegalArgumentException("Extraneous for loop.")); + } + + statementCount = 1; + + if (maxLoopCounter > 0) { + loopCounterSlot = variables.getVariable(location, "#loop").slot; + } + + variables.decrementScope(); + + return this; + } else { + throw location.createError(new IllegalArgumentException("Illegal for each type [" + expression.actual.name + "].")); + } } @Override void write(MethodWriter writer) { + expression.write(writer); + if (java.lang.reflect.Modifier.isInterface(method.owner.clazz.getModifiers())) { + writer.invokeInterface(method.owner.type, method.method); + } else { + writer.invokeVirtual(method.owner.type, method.method); + } + + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ISTORE), iterator.slot); + + Label end = new Label(); + + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); + writer.invokeInterface(hasNext.owner.type, hasNext.method); + writer.ifZCmp(MethodWriter.EQ, end); + + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); + writer.invokeInterface(next.owner.type, next.method); + writer.writeCast(cast); + writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); + + block.write(writer); + + writer.mark(end); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java index 75909dff81c..6085c1ce8d9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java @@ -40,11 +40,11 @@ public final class SFor extends AStatement { ANode initializer, AExpression condition, AExpression afterthought, SBlock block) { super(location); + this.maxLoopCounter = maxLoopCounter; this.initializer = initializer; this.condition = condition; this.afterthought = afterthought; this.block = block; - this.maxLoopCounter = maxLoopCounter; } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index 0d6a54b515b..1fffcf1ee29 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -125,6 +125,11 @@ public class BasicStatementTests extends ScriptTestCase { } } + public void testEachStatement() { + assertEquals(6, exec("List l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + + " for (int x : l) total += x; return x")); + } + public void testDeclarationStatement() { assertEquals((byte)2, exec("byte a = 2; return a;")); assertEquals((short)2, exec("short a = 2; return a;")); From a4ffaa6e7a5660071dbd84d0de641321e7124b3a Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 13:53:31 -0700 Subject: [PATCH 27/39] Working iterable foreach with tests. --- .../src/main/antlr/PainlessParser.g4 | 2 +- .../org/elasticsearch/painless/antlr/Walker.java | 2 +- .../org/elasticsearch/painless/node/SEach.java | 15 +++++++++++---- .../painless/BasicStatementTests.java | 9 +++++++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index dad8db62117..6a5ea6bf7fe 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -74,7 +74,7 @@ decltype ; funcref - : TYPE REF ID + : ( TYPE | ID ) REF ID ; declvar diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index a309fccdd27..4ac090a4228 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -279,7 +279,7 @@ public final class Walker extends PainlessParserBaseVisitor { String type = ctx.decltype().getText(); String name = ctx.ID().getText(); - AExpression expression = (AExpression)visit(ctx.expression()); + AExpression expression = (AExpression)visitExpression(ctx.expression()); SBlock block = (SBlock)visit(ctx.trailer()); return new SEach(location(ctx), settings.getMaxLoopCounter(), type, name, expression, block); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 8d4052326fc..d7942583387 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -33,6 +33,9 @@ import org.elasticsearch.painless.Variables.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +import java.util.HashMap; +import java.util.Map; + public class SEach extends AStatement { final int maxLoopCounter; @@ -85,7 +88,7 @@ public class SEach extends AStatement { Type itr = Definition.getType("Iterator"); - variable = variables.addVariable(location, type, name, false, false); + variable = variables.addVariable(location, type, name, true, false); iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); @@ -107,11 +110,11 @@ public class SEach extends AStatement { if (next == null) { throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); - } else if (next.rtn.sort != Sort.OBJECT) { - throw location.createError(new IllegalArgumentException("Method [next] does not return type [Object].")); + } else if (next.rtn.sort != Sort.DEF) { + throw location.createError(new IllegalArgumentException("Method [next] does not return type [def].")); } - cast = AnalyzerCaster.getLegalCast(location, Definition.getType("Object"), type, true, true); + cast = AnalyzerCaster.getLegalCast(location, Definition.DEF_TYPE, type, true, true); if (block == null) { throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); @@ -150,8 +153,11 @@ public class SEach extends AStatement { writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ISTORE), iterator.slot); + Label begin = new Label(); Label end = new Label(); + writer.mark(begin); + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); writer.invokeInterface(hasNext.owner.type, hasNext.method); writer.ifZCmp(MethodWriter.EQ, end); @@ -163,6 +169,7 @@ public class SEach extends AStatement { block.write(writer); + writer.goTo(begin); writer.mark(end); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index 1fffcf1ee29..7a2846433a6 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -125,9 +125,14 @@ public class BasicStatementTests extends ScriptTestCase { } } - public void testEachStatement() { + public void testIterableForEachStatement() { assertEquals(6, exec("List l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + - " for (int x : l) total += x; return x")); + " for (int x : l) total += x; return total")); + assertEquals("123", exec("List l = new ArrayList(); l.add('1'); l.add('2'); l.add('3'); String cat = '';" + + " for (String x : l) cat += x; return cat")); + assertEquals("1236", exec("Map m = new HashMap(); m.put('1', 1); m.put('2', 2); m.put('3', 3);" + + " String cat = ''; int total = 0;" + + " for (Map.Entry e : m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); } public void testDeclarationStatement() { From 080946c91551cc3008db41a8fed5a780becf78b3 Mon Sep 17 00:00:00 2001 From: Jared Carey Date: Mon, 6 Jun 2016 15:00:13 -0600 Subject: [PATCH 28/39] Fix typo in template validation message This commit addresses a typo in a template validation message in MetaDataIndexTemplateService.java. Relates #18754 --- .../cluster/metadata/MetaDataIndexTemplateService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 167dea5cf42..8f59ea3a527 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -256,7 +256,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { validationErrors.add("template must not start with '_'"); } if (!Strings.validFileNameExcludingAstrix(request.template)) { - validationErrors.add("template must not container the following characters " + Strings.INVALID_FILENAME_CHARS); + validationErrors.add("template must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); } List indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings); From 9a78f6955bd2d40306830ee211226f123c9c2d85 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 15:25:09 -0700 Subject: [PATCH 29/39] Added foreach for array types. --- .../painless/node/AStatement.java | 19 +++ .../painless/node/SArrayEach.java | 130 ++++++++++++++++++ .../elasticsearch/painless/node/SEach.java | 10 +- .../painless/BasicStatementTests.java | 9 ++ 4 files changed, 163 insertions(+), 5 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java index 7b5d4a2b4dc..f6b1048028c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java @@ -124,4 +124,23 @@ public abstract class AStatement extends ANode { * Writes ASM based on the data collected during the analysis phase. */ abstract void write(MethodWriter writer); + + /** + * Used to copy statement data from one to another during analysis in the case of replacement. + */ + final AStatement copy(AStatement statement) { + lastSource = statement.lastSource; + beginLoop = statement.beginLoop; + inLoop = statement.inLoop; + lastLoop = statement.lastLoop; + methodEscape = statement.methodEscape; + loopEscape = statement.loopEscape; + allEscape = statement.allEscape; + anyContinue = statement.anyContinue; + anyBreak = statement.anyBreak; + loopCounterSlot = statement.loopCounterSlot; + statementCount = statement.statementCount; + + return this; + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java new file mode 100644 index 00000000000..1c3f90ed93e --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +import org.elasticsearch.painless.AnalyzerCaster; +import org.elasticsearch.painless.Definition; +import org.elasticsearch.painless.Definition.Cast; +import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.Variables; +import org.elasticsearch.painless.Variables.Variable; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; + +class SArrayEach extends AStatement { + final int maxLoopCounter; + final String type; + final String name; + AExpression expression; + AStatement block; + + Variable variable = null; + Variable array = null; + Variable index = null; + Type indexed = null; + Cast cast = null; + + SArrayEach(final Location location, final int maxLoopCounter, + final String type, final String name, final AExpression expression, final SBlock block) { + super(location); + + this.maxLoopCounter = maxLoopCounter; + this.type = type; + this.name = name; + this.expression = expression; + this.block = block; + } + + @Override + AStatement analyze(Variables variables) { + final Type type; + + try { + type = Definition.getType(this.type); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); + } + + variables.incrementScope(); + + variable = variables.addVariable(location, type, name, true, false); + array = variables.addVariable(location, expression.actual, "#array" + location.getOffset(), true, false); + index = variables.addVariable(location, Definition.INT_TYPE, "#index" + location.getOffset(), true, false); + indexed = Definition.getType(expression.actual.struct, expression.actual.dimensions - 1); + cast = AnalyzerCaster.getLegalCast(location, indexed, type, true, true); + + if (block == null) { + throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); + } + + block.beginLoop = true; + block.inLoop = true; + block = block.analyze(variables); + block.statementCount = Math.max(1, block.statementCount); + + if (block.loopEscape && !block.anyContinue) { + throw createError(new IllegalArgumentException("Extraneous for loop.")); + } + + statementCount = 1; + + if (maxLoopCounter > 0) { + loopCounterSlot = variables.getVariable(location, "#loop").slot; + } + + variables.decrementScope(); + + return this; + } + + @Override + void write(MethodWriter writer) { + writer.writeStatementOffset(location); + + expression.write(writer); + writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ISTORE), array.slot); + writer.push(-1); + writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ISTORE), index.slot); + + Label begin = new Label(); + Label end = new Label(); + + writer.mark(begin); + + writer.visitIincInsn(index.slot, 1); + writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ILOAD), index.slot); + writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ILOAD), array.slot); + writer.arrayLength(); + writer.ifICmp(MethodWriter.GE, end); + + writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ILOAD), array.slot); + writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ILOAD), index.slot); + writer.arrayLoad(indexed.type); + writer.writeCast(cast); + writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); + + block.write(writer); + + writer.goTo(begin); + writer.mark(end); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index d7942583387..5dd1ee0936d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -33,9 +33,6 @@ import org.elasticsearch.painless.Variables.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; -import java.util.HashMap; -import java.util.Map; - public class SEach extends AStatement { final int maxLoopCounter; @@ -62,7 +59,6 @@ public class SEach extends AStatement { this.block = block; } - @Override AStatement analyze(Variables variables) { expression.analyze(variables); @@ -72,7 +68,7 @@ public class SEach extends AStatement { Sort sort = expression.actual.sort; if (sort == Sort.ARRAY) { - throw location.createError(new UnsupportedOperationException("Cannot execute for each against array type.")); + return new SArrayEach(location, maxLoopCounter, type, name, expression, (SBlock)block).copy(this).analyze(variables); } else if (sort == Sort.DEF) { throw location.createError(new UnsupportedOperationException("Cannot execute for each against def type.")); } else if (Iterable.class.isAssignableFrom(expression.actual.clazz)) { @@ -120,6 +116,8 @@ public class SEach extends AStatement { throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); } + block.beginLoop = true; + block.inLoop = true; block = block.analyze(variables); block.statementCount = Math.max(1, block.statementCount); @@ -143,6 +141,8 @@ public class SEach extends AStatement { @Override void write(MethodWriter writer) { + writer.writeStatementOffset(location); + expression.write(writer); if (java.lang.reflect.Modifier.isInterface(method.owner.clazz.getModifiers())) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index 7a2846433a6..6ddb4067426 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -135,6 +135,15 @@ public class BasicStatementTests extends ScriptTestCase { " for (Map.Entry e : m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); } + public void testArrayForEachStatement() { + assertEquals(6, exec("int[] a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + + " for (int x : a) total += x; return total")); + assertEquals("123", exec("String[] a = new String[3]; a[0] = '1'; a[1] = '2'; a[2] = '3'; def total = '';" + + " for (String x : a) total += x; return total")); + assertEquals(6, exec("int[][] i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + + " for (int[] j : i) total += j[0]; return total")); + } + public void testDeclarationStatement() { assertEquals((byte)2, exec("byte a = 2; return a;")); assertEquals((short)2, exec("short a = 2; return a;")); From f89734229677fbaf83233e117eb81d5979152273 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 15:34:51 -0700 Subject: [PATCH 30/39] Updated package info documentation. --- .../main/java/org/elasticsearch/painless/node/SArrayEach.java | 3 +++ .../src/main/java/org/elasticsearch/painless/node/SEach.java | 3 +++ .../java/org/elasticsearch/painless/node/package-info.java | 2 ++ 3 files changed, 8 insertions(+) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java index 1c3f90ed93e..9abb7a18a9a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java @@ -30,6 +30,9 @@ import org.elasticsearch.painless.Variables.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +/** + * Represents a for-each loop shortcut for arrays. (Internal only.) + */ class SArrayEach extends AStatement { final int maxLoopCounter; final String type; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 5dd1ee0936d..0d7e81c2073 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -33,6 +33,9 @@ import org.elasticsearch.painless.Variables.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +/** + * Represents a for-each loop shortcut for iterables. Defers to other S-nodes for non-iterable types. + */ public class SEach extends AStatement { final int maxLoopCounter; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java index 9e693510359..04727507edc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java @@ -63,6 +63,7 @@ * {@link org.elasticsearch.painless.node.LStatic} - Represents a static type target. * {@link org.elasticsearch.painless.node.LString} - Represents a string constant. * {@link org.elasticsearch.painless.node.LVariable} - Represents a variable load/store. + * {@link org.elasticsearch.painless.node.SArrayEach} - Represents a for each loop shortcut for arrays. (Internal only.) * {@link org.elasticsearch.painless.node.SBlock} - Represents a set of statements as a branch of control-flow. * {@link org.elasticsearch.painless.node.SBreak} - Represents a break statement. * {@link org.elasticsearch.painless.node.SCatch} - Represents a catch block as part of a try-catch block. @@ -70,6 +71,7 @@ * {@link org.elasticsearch.painless.node.SDeclaration} - Represents a single variable declaration. * {@link org.elasticsearch.painless.node.SDeclBlock} - Represents a series of declarations. * {@link org.elasticsearch.painless.node.SDo} - Represents a do-while loop. + * {@link org.elasticsearch.painless.node.SEach} - Represents a for each loop shortcut for iterables. * {@link org.elasticsearch.painless.node.SExpression} - Represents the top-level node for an expression as a statement. * {@link org.elasticsearch.painless.node.SFor} - Represents a for loop. * {@link org.elasticsearch.painless.node.SIf} - Represents an if block. From a088d367f7d22267cc16bbcb7146f26a27168bb1 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 15:58:56 -0700 Subject: [PATCH 31/39] More comments. --- .../elasticsearch/painless/node/EChain.java | 12 +++++----- .../elasticsearch/painless/node/SDefEach.java | 23 +++++++++++++++++++ .../elasticsearch/painless/node/SEach.java | 3 +++ 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java index df0d670320d..9b2968db565 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EChain.java @@ -269,14 +269,14 @@ public final class EChain extends AExpression { */ @Override void write(MethodWriter writer) { + writer.writeDebugInfo(location); + // For the case where the chain represents a String concatenation - // we must first write debug information, and then depending on the - // Java version write a StringBuilder or track types going onto the - // stack. This must be done before the links in the chain are read - // because we need the StringBuilder to be placed on the stack - // ahead of any potential concatenation arguments. + // we must, depending on the Java version, write a StringBuilder or + // track types going onto the stack. This must be done before the + // links in the chain are read because we need the StringBuilder to + // be placed on the stack ahead of any potential concatenation arguments. if (cat) { - writer.writeDebugInfo(location); writer.writeNewStrings(); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java new file mode 100644 index 00000000000..000fc42c7a0 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.node; + +public class SDefEach { +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 0d7e81c2073..3266fdc9ba6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -88,6 +88,9 @@ public class SEach extends AStatement { Type itr = Definition.getType("Iterator"); variable = variables.addVariable(location, type, name, true, false); + + // We must store the iterator as a variable for securing a slot on the stack, and + // also add the location offset to make the name unique in case of nested for each loops. iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); From aeaf39f36d512a2dc37d37916198c1101a14a9bd Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 16:07:12 -0700 Subject: [PATCH 32/39] Added a for each stub node for a target type of def. --- .../painless/node/SArrayEach.java | 6 +- .../elasticsearch/painless/node/SDefEach.java | 134 +++++++++++++++++- .../elasticsearch/painless/node/SEach.java | 5 +- 3 files changed, 139 insertions(+), 6 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java index 9abb7a18a9a..032483f59f2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java @@ -46,8 +46,7 @@ class SArrayEach extends AStatement { Type indexed = null; Cast cast = null; - SArrayEach(final Location location, final int maxLoopCounter, - final String type, final String name, final AExpression expression, final SBlock block) { + public SArrayEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { super(location); this.maxLoopCounter = maxLoopCounter; @@ -59,6 +58,9 @@ class SArrayEach extends AStatement { @Override AStatement analyze(Variables variables) { + // Note that we do not need to analyze the expression as this must already be done + // in the parent to determine that the for each target type is an array. + final Type type; try { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java index 000fc42c7a0..ab823698d3e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java @@ -19,5 +19,137 @@ package org.elasticsearch.painless.node; -public class SDefEach { +import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.Variables; + +public class SDefEach extends AStatement { + + final int maxLoopCounter; + final String type; + final String name; + AExpression expression; + AStatement block; + + public SDefEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { + super(location); + + this.maxLoopCounter = maxLoopCounter; + this.type = type; + this.name = name; + this.expression = expression; + this.block = block; + } + + @Override + AStatement analyze(Variables variables) { + // Note that we do not need to analyze the expression as this must already be done + // in the parent to determine that the for each target type is def. + + throw location.createError(new UnsupportedOperationException("Cannot execute for each against def type.")); + + /* + try { + type = Definition.getType(this.type); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); + } + + variables.incrementScope(); + + Type itr = Definition.getType("Iterator"); + + variable = variables.addVariable(location, type, name, true, false); + + // We must store the iterator as a variable for securing a slot on the stack, and + // also add the location offset to make the name unique in case of nested for each loops. + iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); + + method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); + + if (method == null) { + throw location.createError(new IllegalArgumentException( + "Unable to create iterator for the type [" + expression.actual.name + "].")); + } + + hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); + + if (hasNext == null) { + throw location.createError(new IllegalArgumentException("Method [hasNext] does not exist for type [Iterator].")); + } else if (hasNext.rtn.sort != Sort.BOOL) { + throw location.createError(new IllegalArgumentException("Method [hasNext] does not return type [boolean].")); + } + + next = itr.struct.methods.get(new MethodKey("next", 0)); + + if (next == null) { + throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); + } else if (next.rtn.sort != Sort.DEF) { + throw location.createError(new IllegalArgumentException("Method [next] does not return type [def].")); + } + + cast = AnalyzerCaster.getLegalCast(location, Definition.DEF_TYPE, type, true, true); + + if (block == null) { + throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); + } + + block.beginLoop = true; + block.inLoop = true; + block = block.analyze(variables); + block.statementCount = Math.max(1, block.statementCount); + + if (block.loopEscape && !block.anyContinue) { + throw createError(new IllegalArgumentException("Extraneous for loop.")); + } + + statementCount = 1; + + if (maxLoopCounter > 0) { + loopCounterSlot = variables.getVariable(location, "#loop").slot; + } + + variables.decrementScope(); + + return this; + */ + } + + @Override + void write(MethodWriter writer) { + throw location.createError(new IllegalStateException("Illegal tree structure.")); + + /* + writer.writeStatementOffset(location); + + expression.write(writer); + + if (java.lang.reflect.Modifier.isInterface(method.owner.clazz.getModifiers())) { + writer.invokeInterface(method.owner.type, method.method); + } else { + writer.invokeVirtual(method.owner.type, method.method); + } + + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ISTORE), iterator.slot); + + Label begin = new Label(); + Label end = new Label(); + + writer.mark(begin); + + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); + writer.invokeInterface(hasNext.owner.type, hasNext.method); + writer.ifZCmp(MethodWriter.EQ, end); + + writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); + writer.invokeInterface(next.owner.type, next.method); + writer.writeCast(cast); + writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); + + block.write(writer); + + writer.goTo(begin); + writer.mark(end); + */ + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 3266fdc9ba6..79395a4acc7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -51,8 +51,7 @@ public class SEach extends AStatement { Method next = null; Cast cast = null; - public SEach(final Location location, final int maxLoopCounter, - final String type, final String name, final AExpression expression, final SBlock block) { + public SEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { super(location); this.maxLoopCounter = maxLoopCounter; @@ -73,7 +72,7 @@ public class SEach extends AStatement { if (sort == Sort.ARRAY) { return new SArrayEach(location, maxLoopCounter, type, name, expression, (SBlock)block).copy(this).analyze(variables); } else if (sort == Sort.DEF) { - throw location.createError(new UnsupportedOperationException("Cannot execute for each against def type.")); + return new SDefEach(location, maxLoopCounter, type, name, expression, (SBlock)block).copy(this).analyze(variables); } else if (Iterable.class.isAssignableFrom(expression.actual.clazz)) { final Type type; From d331f8768e31ffde0af5d29e1ab94ec216e8d55e Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 6 Jun 2016 19:41:37 -0400 Subject: [PATCH 33/39] implement dynamic case for iterables --- .../java/org/elasticsearch/painless/Def.java | 18 ++ .../elasticsearch/painless/DefBootstrap.java | 4 + .../elasticsearch/painless/node/SDefEach.java | 155 ------------------ .../elasticsearch/painless/node/SEach.java | 25 ++- .../painless/BasicStatementTests.java | 20 +++ 5 files changed, 59 insertions(+), 163 deletions(-) delete mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index f27f0ab20a9..269386bf944 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -27,6 +27,7 @@ import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles.Lookup; import java.lang.invoke.MethodType; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -103,6 +104,8 @@ public final class Def { private static final MethodHandle LIST_GET; /** pointer to List.set(int,Object) */ private static final MethodHandle LIST_SET; + /** pointer to Iterable.iterator() */ + private static final MethodHandle ITERATOR; /** factory for arraylength MethodHandle (intrinsic) from Java 9 */ private static final MethodHandle JAVA9_ARRAY_LENGTH_MH_FACTORY; @@ -114,6 +117,7 @@ public final class Def { MAP_PUT = lookup.findVirtual(Map.class , "put", MethodType.methodType(Object.class, Object.class, Object.class)); LIST_GET = lookup.findVirtual(List.class, "get", MethodType.methodType(Object.class, int.class)); LIST_SET = lookup.findVirtual(List.class, "set", MethodType.methodType(Object.class, int.class, Object.class)); + ITERATOR = lookup.findVirtual(Iterable.class, "iterator", MethodType.methodType(Iterator.class)); } catch (final ReflectiveOperationException roe) { throw new AssertionError(roe); } @@ -374,6 +378,20 @@ public final class Def { throw new IllegalArgumentException("Attempting to address a non-array type " + "[" + receiverClass.getCanonicalName() + "] as an array."); } + + /** + * Returns a method handle to do iteration (for enhanced for loop) + * @param receiverClass Class of the array to load the value from + * @return a MethodHandle that accepts the receiver as first argument, returns iterator + */ + static MethodHandle lookupIterator(Class receiverClass) { + if (Iterable.class.isAssignableFrom(receiverClass)) { + return ITERATOR; + } else { + // TODO: arrays + throw new IllegalArgumentException("Cannot iterate over [" + receiverClass.getCanonicalName() + "]"); + } + } // NOTE: Below methods are not cached, instead invoked directly because they are performant. // We also check for Long values first when possible since the type is more diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java index 40b9cc6cbe8..1d2ca64abe0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java @@ -56,6 +56,8 @@ public final class DefBootstrap { public static final int ARRAY_LOAD = 3; /** static bootstrap parameter indicating a dynamic array store, e.g. foo[bar] = baz */ public static final int ARRAY_STORE = 4; + /** static bootstrap parameter indicating a dynamic iteration, e.g. for (x : y) */ + public static final int ITERATOR = 5; /** * CallSite that implements the polymorphic inlining cache (PIC). @@ -103,6 +105,8 @@ public final class DefBootstrap { return Def.lookupArrayLoad(clazz); case ARRAY_STORE: return Def.lookupArrayStore(clazz); + case ITERATOR: + return Def.lookupIterator(clazz); default: throw new AssertionError(); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java deleted file mode 100644 index ab823698d3e..00000000000 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDefEach.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.painless.node; - -import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; - -public class SDefEach extends AStatement { - - final int maxLoopCounter; - final String type; - final String name; - AExpression expression; - AStatement block; - - public SDefEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { - super(location); - - this.maxLoopCounter = maxLoopCounter; - this.type = type; - this.name = name; - this.expression = expression; - this.block = block; - } - - @Override - AStatement analyze(Variables variables) { - // Note that we do not need to analyze the expression as this must already be done - // in the parent to determine that the for each target type is def. - - throw location.createError(new UnsupportedOperationException("Cannot execute for each against def type.")); - - /* - try { - type = Definition.getType(this.type); - } catch (IllegalArgumentException exception) { - throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); - } - - variables.incrementScope(); - - Type itr = Definition.getType("Iterator"); - - variable = variables.addVariable(location, type, name, true, false); - - // We must store the iterator as a variable for securing a slot on the stack, and - // also add the location offset to make the name unique in case of nested for each loops. - iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); - - method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); - - if (method == null) { - throw location.createError(new IllegalArgumentException( - "Unable to create iterator for the type [" + expression.actual.name + "].")); - } - - hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); - - if (hasNext == null) { - throw location.createError(new IllegalArgumentException("Method [hasNext] does not exist for type [Iterator].")); - } else if (hasNext.rtn.sort != Sort.BOOL) { - throw location.createError(new IllegalArgumentException("Method [hasNext] does not return type [boolean].")); - } - - next = itr.struct.methods.get(new MethodKey("next", 0)); - - if (next == null) { - throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); - } else if (next.rtn.sort != Sort.DEF) { - throw location.createError(new IllegalArgumentException("Method [next] does not return type [def].")); - } - - cast = AnalyzerCaster.getLegalCast(location, Definition.DEF_TYPE, type, true, true); - - if (block == null) { - throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); - } - - block.beginLoop = true; - block.inLoop = true; - block = block.analyze(variables); - block.statementCount = Math.max(1, block.statementCount); - - if (block.loopEscape && !block.anyContinue) { - throw createError(new IllegalArgumentException("Extraneous for loop.")); - } - - statementCount = 1; - - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; - } - - variables.decrementScope(); - - return this; - */ - } - - @Override - void write(MethodWriter writer) { - throw location.createError(new IllegalStateException("Illegal tree structure.")); - - /* - writer.writeStatementOffset(location); - - expression.write(writer); - - if (java.lang.reflect.Modifier.isInterface(method.owner.clazz.getModifiers())) { - writer.invokeInterface(method.owner.type, method.method); - } else { - writer.invokeVirtual(method.owner.type, method.method); - } - - writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ISTORE), iterator.slot); - - Label begin = new Label(); - Label end = new Label(); - - writer.mark(begin); - - writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); - writer.invokeInterface(hasNext.owner.type, hasNext.method); - writer.ifZCmp(MethodWriter.EQ, end); - - writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); - writer.invokeInterface(next.owner.type, next.method); - writer.writeCast(cast); - writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); - - block.write(writer); - - writer.goTo(begin); - writer.mark(end); - */ - } -} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 79395a4acc7..309c6d97967 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.AnalyzerCaster; +import org.elasticsearch.painless.DefBootstrap; import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Method; @@ -33,6 +34,8 @@ import org.elasticsearch.painless.Variables.Variable; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; +import static org.elasticsearch.painless.WriterConstants.DEF_BOOTSTRAP_HANDLE; + /** * Represents a for-each loop shortcut for iterables. Defers to other S-nodes for non-iterable types. */ @@ -71,9 +74,7 @@ public class SEach extends AStatement { if (sort == Sort.ARRAY) { return new SArrayEach(location, maxLoopCounter, type, name, expression, (SBlock)block).copy(this).analyze(variables); - } else if (sort == Sort.DEF) { - return new SDefEach(location, maxLoopCounter, type, name, expression, (SBlock)block).copy(this).analyze(variables); - } else if (Iterable.class.isAssignableFrom(expression.actual.clazz)) { + } else if (sort == Sort.DEF || Iterable.class.isAssignableFrom(expression.actual.clazz)) { final Type type; try { @@ -92,11 +93,15 @@ public class SEach extends AStatement { // also add the location offset to make the name unique in case of nested for each loops. iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); - method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); + if (sort == Sort.DEF) { + method = null; + } else { + method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); - if (method == null) { - throw location.createError(new IllegalArgumentException( - "Unable to create iterator for the type [" + expression.actual.name + "].")); + if (method == null) { + throw location.createError(new IllegalArgumentException( + "Unable to create iterator for the type [" + expression.actual.name + "].")); + } } hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); @@ -150,7 +155,11 @@ public class SEach extends AStatement { expression.write(writer); - if (java.lang.reflect.Modifier.isInterface(method.owner.clazz.getModifiers())) { + if (method == null) { + Type itr = Definition.getType("Iterator"); + String desc = org.objectweb.asm.Type.getMethodDescriptor(itr.type, Definition.DEF_TYPE.type); + writer.invokeDynamic("iterator", desc, DEF_BOOTSTRAP_HANDLE, (Object)DefBootstrap.ITERATOR); + } else if (java.lang.reflect.Modifier.isInterface(method.owner.clazz.getModifiers())) { writer.invokeInterface(method.owner.type, method.method); } else { writer.invokeVirtual(method.owner.type, method.method); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index 6ddb4067426..64137bfbcb3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -134,6 +134,16 @@ public class BasicStatementTests extends ScriptTestCase { " String cat = ''; int total = 0;" + " for (Map.Entry e : m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); } + + public void testIterableForEachStatementDef() { + assertEquals(6, exec("def l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + + " for (int x : l) total += x; return total")); + assertEquals("123", exec("def l = new ArrayList(); l.add('1'); l.add('2'); l.add('3'); String cat = '';" + + " for (String x : l) cat += x; return cat")); + assertEquals("1236", exec("def m = new HashMap(); m.put('1', 1); m.put('2', 2); m.put('3', 3);" + + " String cat = ''; int total = 0;" + + " for (Map.Entry e : m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); + } public void testArrayForEachStatement() { assertEquals(6, exec("int[] a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + @@ -143,6 +153,16 @@ public class BasicStatementTests extends ScriptTestCase { assertEquals(6, exec("int[][] i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + " for (int[] j : i) total += j[0]; return total")); } + + @AwaitsFix(bugUrl = "working on it") + public void testArrayForEachStatementDef() { + assertEquals(6, exec("def a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + + " for (int x : a) total += x; return total")); + assertEquals("123", exec("def a = new String[3]; a[0] = '1'; a[1] = '2'; a[2] = '3'; def total = '';" + + " for (String x : a) total += x; return total")); + assertEquals(6, exec("def i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + + " for (int[] j : i) total += j[0]; return total")); + } public void testDeclarationStatement() { assertEquals((byte)2, exec("byte a = 2; return a;")); From 2852e82ab9ff18aadda1cb22bf4380d6568719f0 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 6 Jun 2016 20:07:32 -0400 Subject: [PATCH 34/39] add def iteration over arrays --- .../java/org/elasticsearch/painless/Def.java | 99 +++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index 269386bf944..614490d0295 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -379,6 +379,103 @@ public final class Def { "[" + receiverClass.getCanonicalName() + "] as an array."); } + /** Helper class for isolating MethodHandles and methods to get iterators over arrays + * (to emulate "enhanced for loop" using MethodHandles). These cause boxing, and are not as efficient + * as they could be, but works. + */ + @SuppressWarnings("unused") // iterator() methods are are actually used, javac just does not know :) + private static final class ArrayIteratorHelper { + private static final Lookup PRIV_LOOKUP = MethodHandles.lookup(); + + private static final Map,MethodHandle> ARRAY_TYPE_MH_MAPPING = Collections.unmodifiableMap( + Stream.of(boolean[].class, byte[].class, short[].class, int[].class, long[].class, + char[].class, float[].class, double[].class, Object[].class) + .collect(Collectors.toMap(Function.identity(), type -> { + try { + return PRIV_LOOKUP.findStatic(PRIV_LOOKUP.lookupClass(), "iterator", MethodType.methodType(Iterator.class, type)); + } catch (ReflectiveOperationException e) { + throw new AssertionError(e); + } + })) + ); + + private static final MethodHandle OBJECT_ARRAY_MH = ARRAY_TYPE_MH_MAPPING.get(Object[].class); + + static Iterator iterator(final boolean[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Boolean next() { return array[index++]; } + }; + } + static Iterator iterator(final byte[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Byte next() { return array[index++]; } + }; + } + static Iterator iterator(final short[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Short next() { return array[index++]; } + }; + } + static Iterator iterator(final int[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Integer next() { return array[index++]; } + }; + } + static Iterator iterator(final long[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Long next() { return array[index++]; } + }; + } + static Iterator iterator(final char[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Character next() { return array[index++]; } + }; + } + static Iterator iterator(final float[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Float next() { return array[index++]; } + }; + } + static Iterator iterator(final double[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Double next() { return array[index++]; } + }; + } + static Iterator iterator(final Object[] array) { + return new Iterator() { + int index = 0; + @Override public boolean hasNext() { return index < array.length; } + @Override public Object next() { return array[index++]; } + }; + } + + static MethodHandle newIterator(Class arrayType) { + if (!arrayType.isArray()) { + throw new IllegalArgumentException("type must be an array"); + } + return (ARRAY_TYPE_MH_MAPPING.containsKey(arrayType)) ? + ARRAY_TYPE_MH_MAPPING.get(arrayType) : + OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); + } + + private ArrayIteratorHelper() {} + } /** * Returns a method handle to do iteration (for enhanced for loop) * @param receiverClass Class of the array to load the value from @@ -387,6 +484,8 @@ public final class Def { static MethodHandle lookupIterator(Class receiverClass) { if (Iterable.class.isAssignableFrom(receiverClass)) { return ITERATOR; + } else if (receiverClass.isArray()) { + return ArrayIteratorHelper.newIterator(receiverClass); } else { // TODO: arrays throw new IllegalArgumentException("Cannot iterate over [" + receiverClass.getCanonicalName() + "]"); From 8db9a971e510169dd08ddd87d9a74c3460033cac Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 6 Jun 2016 20:30:42 -0400 Subject: [PATCH 35/39] enable test --- .../java/org/elasticsearch/painless/BasicStatementTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index 64137bfbcb3..c392e86806e 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -154,7 +154,6 @@ public class BasicStatementTests extends ScriptTestCase { " for (int[] j : i) total += j[0]; return total")); } - @AwaitsFix(bugUrl = "working on it") public void testArrayForEachStatementDef() { assertEquals(6, exec("def a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + " for (int x : a) total += x; return total")); From 6dace47c1f493aea0b5e29b5bfc7d6e5358c7939 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 17:59:39 -0700 Subject: [PATCH 36/39] Reverted S-node design change. --- .../src/main/antlr/PainlessParser.g4 | 2 +- .../painless/node/AStatement.java | 24 +- .../painless/node/SArrayEach.java | 135 ---------- .../elasticsearch/painless/node/SBlock.java | 21 +- .../elasticsearch/painless/node/SBreak.java | 4 +- .../elasticsearch/painless/node/SCatch.java | 8 +- .../painless/node/SContinue.java | 4 +- .../painless/node/SDeclBlock.java | 13 +- .../painless/node/SDeclaration.java | 4 +- .../org/elasticsearch/painless/node/SDo.java | 8 +- .../elasticsearch/painless/node/SEach.java | 242 ++++++++++++------ .../painless/node/SExpression.java | 4 +- .../org/elasticsearch/painless/node/SFor.java | 10 +- .../org/elasticsearch/painless/node/SIf.java | 8 +- .../elasticsearch/painless/node/SIfElse.java | 12 +- .../elasticsearch/painless/node/SReturn.java | 4 +- .../elasticsearch/painless/node/SSource.java | 16 +- .../elasticsearch/painless/node/SThrow.java | 4 +- .../org/elasticsearch/painless/node/STry.java | 17 +- .../elasticsearch/painless/node/SWhile.java | 8 +- .../painless/node/package-info.java | 4 +- 21 files changed, 226 insertions(+), 326 deletions(-) delete mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index 6a5ea6bf7fe..dad8db62117 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -74,7 +74,7 @@ decltype ; funcref - : ( TYPE | ID ) REF ID + : TYPE REF ID ; declvar diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java index f6b1048028c..b9e6679f630 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStatement.java @@ -114,33 +114,11 @@ public abstract class AStatement extends ANode { /** * Checks for errors and collects data for the writing phase. - * @return The new child node for the parent node calling this method. - * Possibly returns a different {@link AStatement} node if a type is - * def or if a different specialization is used. Otherwise, returns itself. */ - abstract AStatement analyze(Variables variables); + abstract void analyze(Variables variables); /** * Writes ASM based on the data collected during the analysis phase. */ abstract void write(MethodWriter writer); - - /** - * Used to copy statement data from one to another during analysis in the case of replacement. - */ - final AStatement copy(AStatement statement) { - lastSource = statement.lastSource; - beginLoop = statement.beginLoop; - inLoop = statement.inLoop; - lastLoop = statement.lastLoop; - methodEscape = statement.methodEscape; - loopEscape = statement.loopEscape; - allEscape = statement.allEscape; - anyContinue = statement.anyContinue; - anyBreak = statement.anyBreak; - loopCounterSlot = statement.loopCounterSlot; - statementCount = statement.statementCount; - - return this; - } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java deleted file mode 100644 index 032483f59f2..00000000000 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SArrayEach.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.painless.node; - -import org.elasticsearch.painless.AnalyzerCaster; -import org.elasticsearch.painless.Definition; -import org.elasticsearch.painless.Definition.Cast; -import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.Variables; -import org.elasticsearch.painless.Variables.Variable; -import org.objectweb.asm.Label; -import org.objectweb.asm.Opcodes; - -/** - * Represents a for-each loop shortcut for arrays. (Internal only.) - */ -class SArrayEach extends AStatement { - final int maxLoopCounter; - final String type; - final String name; - AExpression expression; - AStatement block; - - Variable variable = null; - Variable array = null; - Variable index = null; - Type indexed = null; - Cast cast = null; - - public SArrayEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { - super(location); - - this.maxLoopCounter = maxLoopCounter; - this.type = type; - this.name = name; - this.expression = expression; - this.block = block; - } - - @Override - AStatement analyze(Variables variables) { - // Note that we do not need to analyze the expression as this must already be done - // in the parent to determine that the for each target type is an array. - - final Type type; - - try { - type = Definition.getType(this.type); - } catch (IllegalArgumentException exception) { - throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); - } - - variables.incrementScope(); - - variable = variables.addVariable(location, type, name, true, false); - array = variables.addVariable(location, expression.actual, "#array" + location.getOffset(), true, false); - index = variables.addVariable(location, Definition.INT_TYPE, "#index" + location.getOffset(), true, false); - indexed = Definition.getType(expression.actual.struct, expression.actual.dimensions - 1); - cast = AnalyzerCaster.getLegalCast(location, indexed, type, true, true); - - if (block == null) { - throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); - } - - block.beginLoop = true; - block.inLoop = true; - block = block.analyze(variables); - block.statementCount = Math.max(1, block.statementCount); - - if (block.loopEscape && !block.anyContinue) { - throw createError(new IllegalArgumentException("Extraneous for loop.")); - } - - statementCount = 1; - - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; - } - - variables.decrementScope(); - - return this; - } - - @Override - void write(MethodWriter writer) { - writer.writeStatementOffset(location); - - expression.write(writer); - writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ISTORE), array.slot); - writer.push(-1); - writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ISTORE), index.slot); - - Label begin = new Label(); - Label end = new Label(); - - writer.mark(begin); - - writer.visitIincInsn(index.slot, 1); - writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ILOAD), index.slot); - writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ILOAD), array.slot); - writer.arrayLength(); - writer.ifICmp(MethodWriter.GE, end); - - writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ILOAD), array.slot); - writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ILOAD), index.slot); - writer.arrayLoad(indexed.type); - writer.writeCast(cast); - writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); - - block.write(writer); - - writer.goTo(begin); - writer.mark(end); - } -} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java index 175eb26566c..4dbbd80de54 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBlock.java @@ -23,6 +23,7 @@ import org.elasticsearch.painless.Variables; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import java.util.Collections; import java.util.List; /** @@ -35,27 +36,29 @@ public final class SBlock extends AStatement { public SBlock(Location location, List statements) { super(location); - this.statements = statements; + this.statements = Collections.unmodifiableList(statements); } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { if (statements == null || statements.isEmpty()) { throw createError(new IllegalArgumentException("A block must contain at least one statement.")); } - for (int index = 0; index < statements.size(); ++index) { + AStatement last = statements.get(statements.size() - 1); + + for (AStatement statement : statements) { + // Note that we do not need to check after the last statement because + // there is no statement that can be unreachable after the last. if (allEscape) { throw createError(new IllegalArgumentException("Unreachable statement.")); } - AStatement statement = statements.get(index); - statement.inLoop = inLoop; - statement.lastSource = lastSource && index == statements.size() - 1; - statement.lastLoop = (beginLoop || lastLoop) && index == statements.size() - 1; + statement.lastSource = lastSource && statement == last; + statement.lastLoop = (beginLoop || lastLoop) && statement == last; - statements.set(index, statement.analyze(variables)); + statement.analyze(variables); methodEscape = statement.methodEscape; loopEscape = statement.loopEscape; @@ -64,8 +67,6 @@ public final class SBlock extends AStatement { anyBreak |= statement.anyBreak; statementCount += statement.statementCount; } - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java index 6e5c984814f..ca72dd0b55b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SBreak.java @@ -33,7 +33,7 @@ public final class SBreak extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { if (!inLoop) { throw createError(new IllegalArgumentException("Break statement outside of a loop.")); } @@ -42,8 +42,6 @@ public final class SBreak extends AStatement { allEscape = true; anyBreak = true; statementCount = 1; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java index 873192b1933..8bcaf9d22cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java @@ -35,7 +35,7 @@ public final class SCatch extends AStatement { final String type; final String name; - AStatement block; + final SBlock block; Variable variable; @@ -52,7 +52,7 @@ public final class SCatch extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { final Type type; try { @@ -72,7 +72,7 @@ public final class SCatch extends AStatement { block.inLoop = inLoop; block.lastLoop = lastLoop; - block = block.analyze(variables); + block.analyze(variables); methodEscape = block.methodEscape; loopEscape = block.loopEscape; @@ -81,8 +81,6 @@ public final class SCatch extends AStatement { anyBreak = block.anyBreak; statementCount = block.statementCount; } - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java index 7c8eb6fefcc..ef80766bdd1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SContinue.java @@ -33,7 +33,7 @@ public final class SContinue extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { if (!inLoop) { throw createError(new IllegalArgumentException("Continue statement outside of a loop.")); } @@ -45,8 +45,6 @@ public final class SContinue extends AStatement { allEscape = true; anyContinue = true; statementCount = 1; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java index c7d1c93efb6..1ff187afe29 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclBlock.java @@ -23,6 +23,7 @@ import org.elasticsearch.painless.Variables; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import java.util.Collections; import java.util.List; /** @@ -35,20 +36,16 @@ public final class SDeclBlock extends AStatement { public SDeclBlock(Location location, List declarations) { super(location); - this.declarations = declarations; + this.declarations = Collections.unmodifiableList(declarations); } @Override - AStatement analyze(Variables variables) { - for (int index = 0; index < declarations.size(); ++index) { - AStatement declaration = declarations.get(index); - - declarations.set(index, (SDeclaration)declaration.analyze(variables)); + void analyze(Variables variables) { + for (SDeclaration declaration : declarations) { + declaration.analyze(variables); } statementCount = declarations.size(); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java index 8430382d099..5f039184cb6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java @@ -47,7 +47,7 @@ public final class SDeclaration extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { final Type type; try { @@ -63,8 +63,6 @@ public final class SDeclaration extends AStatement { } variable = variables.addVariable(location, type, name, false, false); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java index 1a0aa132d5e..4989fb77a79 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDo.java @@ -31,7 +31,7 @@ import org.elasticsearch.painless.MethodWriter; public final class SDo extends AStatement { final int maxLoopCounter; - AStatement block; + final SBlock block; AExpression condition; public SDo(Location location, int maxLoopCounter, SBlock block, AExpression condition) { @@ -43,7 +43,7 @@ public final class SDo extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { variables.incrementScope(); if (block == null) { @@ -53,7 +53,7 @@ public final class SDo extends AStatement { block.beginLoop = true; block.inLoop = true; - block = block.analyze(variables); + block.analyze(variables); if (block.loopEscape && !block.anyContinue) { throw createError(new IllegalArgumentException("Extraneous do while loop.")); @@ -83,8 +83,6 @@ public final class SDo extends AStatement { } variables.decrementScope(); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 309c6d97967..3d6d5e07fdc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -45,14 +45,22 @@ public class SEach extends AStatement { final String type; final String name; AExpression expression; - AStatement block; + final SBlock block; + // Members for all cases. Variable variable = null; + Cast cast = null; + + // Members for the array case. + Variable array = null; + Variable index = null; + Type indexed = null; + + // Members for the iterable case. Variable iterator = null; Method method = null; Method hasNext = null; Method next = null; - Cast cast = null; public SEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { super(location); @@ -65,92 +73,172 @@ public class SEach extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { expression.analyze(variables); expression.expected = expression.actual; expression = expression.cast(variables); - Sort sort = expression.actual.sort; - - if (sort == Sort.ARRAY) { - return new SArrayEach(location, maxLoopCounter, type, name, expression, (SBlock)block).copy(this).analyze(variables); - } else if (sort == Sort.DEF || Iterable.class.isAssignableFrom(expression.actual.clazz)) { - final Type type; - - try { - type = Definition.getType(this.type); - } catch (IllegalArgumentException exception) { - throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); - } - - variables.incrementScope(); - - Type itr = Definition.getType("Iterator"); - - variable = variables.addVariable(location, type, name, true, false); - - // We must store the iterator as a variable for securing a slot on the stack, and - // also add the location offset to make the name unique in case of nested for each loops. - iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); - - if (sort == Sort.DEF) { - method = null; - } else { - method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); - - if (method == null) { - throw location.createError(new IllegalArgumentException( - "Unable to create iterator for the type [" + expression.actual.name + "].")); - } - } - - hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); - - if (hasNext == null) { - throw location.createError(new IllegalArgumentException("Method [hasNext] does not exist for type [Iterator].")); - } else if (hasNext.rtn.sort != Sort.BOOL) { - throw location.createError(new IllegalArgumentException("Method [hasNext] does not return type [boolean].")); - } - - next = itr.struct.methods.get(new MethodKey("next", 0)); - - if (next == null) { - throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); - } else if (next.rtn.sort != Sort.DEF) { - throw location.createError(new IllegalArgumentException("Method [next] does not return type [def].")); - } - - cast = AnalyzerCaster.getLegalCast(location, Definition.DEF_TYPE, type, true, true); - - if (block == null) { - throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); - } - - block.beginLoop = true; - block.inLoop = true; - block = block.analyze(variables); - block.statementCount = Math.max(1, block.statementCount); - - if (block.loopEscape && !block.anyContinue) { - throw createError(new IllegalArgumentException("Extraneous for loop.")); - } - - statementCount = 1; - - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; - } - - variables.decrementScope(); - - return this; + if (expression.actual.sort == Sort.ARRAY) { + analyzeArray(variables); + } else if (expression.actual.sort == Sort.DEF || Iterable.class.isAssignableFrom(expression.actual.clazz)) { + analyzeIterable(variables); } else { throw location.createError(new IllegalArgumentException("Illegal for each type [" + expression.actual.name + "].")); } } + void analyzeArray(Variables variables) { + final Type type; + + try { + type = Definition.getType(this.type); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); + } + + variables.incrementScope(); + + variable = variables.addVariable(location, type, name, true, false); + array = variables.addVariable(location, expression.actual, "#array" + location.getOffset(), true, false); + index = variables.addVariable(location, Definition.INT_TYPE, "#index" + location.getOffset(), true, false); + indexed = Definition.getType(expression.actual.struct, expression.actual.dimensions - 1); + cast = AnalyzerCaster.getLegalCast(location, indexed, type, true, true); + + if (block == null) { + throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); + } + + block.beginLoop = true; + block.inLoop = true; + block.analyze(variables); + block.statementCount = Math.max(1, block.statementCount); + + if (block.loopEscape && !block.anyContinue) { + throw createError(new IllegalArgumentException("Extraneous for loop.")); + } + + statementCount = 1; + + if (maxLoopCounter > 0) { + loopCounterSlot = variables.getVariable(location, "#loop").slot; + } + + variables.decrementScope(); + } + + void analyzeIterable(Variables variables) { + final Type type; + + try { + type = Definition.getType(this.type); + } catch (IllegalArgumentException exception) { + throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); + } + + variables.incrementScope(); + + Type itr = Definition.getType("Iterator"); + + variable = variables.addVariable(location, type, name, true, false); + + // We must store the iterator as a variable for securing a slot on the stack, and + // also add the location offset to make the name unique in case of nested for each loops. + iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); + + if (expression.actual.sort == Sort.DEF) { + method = null; + } else { + method = expression.actual.struct.methods.get(new MethodKey("iterator", 0)); + + if (method == null) { + throw location.createError(new IllegalArgumentException( + "Unable to create iterator for the type [" + expression.actual.name + "].")); + } + } + + hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); + + if (hasNext == null) { + throw location.createError(new IllegalArgumentException("Method [hasNext] does not exist for type [Iterator].")); + } else if (hasNext.rtn.sort != Sort.BOOL) { + throw location.createError(new IllegalArgumentException("Method [hasNext] does not return type [boolean].")); + } + + next = itr.struct.methods.get(new MethodKey("next", 0)); + + if (next == null) { + throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); + } else if (next.rtn.sort != Sort.DEF) { + throw location.createError(new IllegalArgumentException("Method [next] does not return type [def].")); + } + + cast = AnalyzerCaster.getLegalCast(location, Definition.DEF_TYPE, type, true, true); + + if (block == null) { + throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); + } + + block.beginLoop = true; + block.inLoop = true; + block.analyze(variables); + block.statementCount = Math.max(1, block.statementCount); + + if (block.loopEscape && !block.anyContinue) { + throw createError(new IllegalArgumentException("Extraneous for loop.")); + } + + statementCount = 1; + + if (maxLoopCounter > 0) { + loopCounterSlot = variables.getVariable(location, "#loop").slot; + } + + variables.decrementScope(); + } + @Override void write(MethodWriter writer) { + if (array != null) { + writeArray(writer); + } else if (iterator != null) { + writeIterable(writer); + } else { + throw location.createError(new IllegalStateException("Illegal tree structure.")); + } + } + + void writeArray(MethodWriter writer) { + writer.writeStatementOffset(location); + + expression.write(writer); + writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ISTORE), array.slot); + writer.push(-1); + writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ISTORE), index.slot); + + Label begin = new Label(); + Label end = new Label(); + + writer.mark(begin); + + writer.visitIincInsn(index.slot, 1); + writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ILOAD), index.slot); + writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ILOAD), array.slot); + writer.arrayLength(); + writer.ifICmp(MethodWriter.GE, end); + + writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ILOAD), array.slot); + writer.visitVarInsn(index.type.type.getOpcode(Opcodes.ILOAD), index.slot); + writer.arrayLoad(indexed.type); + writer.writeCast(cast); + writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); + + block.write(writer); + + writer.goTo(begin); + writer.mark(end); + } + + void writeIterable(MethodWriter writer) { writer.writeStatementOffset(location); expression.write(writer); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java index 87b412c5d52..1bea07d5599 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SExpression.java @@ -39,7 +39,7 @@ public final class SExpression extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { expression.read = lastSource; expression.analyze(variables); @@ -57,8 +57,6 @@ public final class SExpression extends AStatement { loopEscape = rtn; allEscape = rtn; statementCount = 1; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java index 6085c1ce8d9..04475a91a1a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFor.java @@ -34,7 +34,7 @@ public final class SFor extends AStatement { ANode initializer; AExpression condition; AExpression afterthought; - AStatement block; + final SBlock block; public SFor(Location location, int maxLoopCounter, ANode initializer, AExpression condition, AExpression afterthought, SBlock block) { @@ -48,14 +48,14 @@ public final class SFor extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { variables.incrementScope(); boolean continuous = false; if (initializer != null) { if (initializer instanceof AStatement) { - initializer = ((AStatement)initializer).analyze(variables); + ((AStatement)initializer).analyze(variables); } else if (initializer instanceof AExpression) { AExpression initializer = (AExpression)this.initializer; @@ -103,7 +103,7 @@ public final class SFor extends AStatement { block.beginLoop = true; block.inLoop = true; - block = block.analyze(variables); + block.analyze(variables); if (block.loopEscape && !block.anyContinue) { throw createError(new IllegalArgumentException("Extraneous for loop.")); @@ -124,8 +124,6 @@ public final class SFor extends AStatement { } variables.decrementScope(); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java index aa24c8acc39..954ddac9c6a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIf.java @@ -31,7 +31,7 @@ import org.elasticsearch.painless.MethodWriter; public final class SIf extends AStatement { AExpression condition; - AStatement ifblock; + final SBlock ifblock; public SIf(Location location, AExpression condition, SBlock ifblock) { super(location); @@ -41,7 +41,7 @@ public final class SIf extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { condition.expected = Definition.BOOLEAN_TYPE; condition.analyze(variables); condition = condition.cast(variables); @@ -59,14 +59,12 @@ public final class SIf extends AStatement { ifblock.lastLoop = lastLoop; variables.incrementScope(); - ifblock = ifblock.analyze(variables); + ifblock.analyze(variables); variables.decrementScope(); anyContinue = ifblock.anyContinue; anyBreak = ifblock.anyBreak; statementCount = ifblock.statementCount; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java index 01e1ceb5750..1d801267054 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SIfElse.java @@ -31,8 +31,8 @@ import org.elasticsearch.painless.MethodWriter; public final class SIfElse extends AStatement { AExpression condition; - AStatement ifblock; - AStatement elseblock; + final SBlock ifblock; + final SBlock elseblock; public SIfElse(Location location, AExpression condition, SBlock ifblock, SBlock elseblock) { super(location); @@ -43,7 +43,7 @@ public final class SIfElse extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { condition.expected = Definition.BOOLEAN_TYPE; condition.analyze(variables); condition = condition.cast(variables); @@ -61,7 +61,7 @@ public final class SIfElse extends AStatement { ifblock.lastLoop = lastLoop; variables.incrementScope(); - ifblock = ifblock.analyze(variables); + ifblock.analyze(variables); variables.decrementScope(); anyContinue = ifblock.anyContinue; @@ -77,7 +77,7 @@ public final class SIfElse extends AStatement { elseblock.lastLoop = lastLoop; variables.incrementScope(); - elseblock = elseblock.analyze(variables); + elseblock.analyze(variables); variables.decrementScope(); methodEscape = ifblock.methodEscape && elseblock.methodEscape; @@ -86,8 +86,6 @@ public final class SIfElse extends AStatement { anyContinue |= elseblock.anyContinue; anyBreak |= elseblock.anyBreak; statementCount = Math.max(ifblock.statementCount, elseblock.statementCount); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java index bab12fb52f5..bd9707eb6a4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java @@ -38,7 +38,7 @@ public final class SReturn extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { expression.expected = Definition.OBJECT_TYPE; expression.internal = true; expression.analyze(variables); @@ -49,8 +49,6 @@ public final class SReturn extends AStatement { allEscape = true; statementCount = 1; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 57acc425c46..22fd2e3e28c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -24,6 +24,7 @@ import org.objectweb.asm.Opcodes; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import java.util.Collections; import java.util.List; /** @@ -36,36 +37,35 @@ public final class SSource extends AStatement { public SSource(Location location, List statements) { super(location); - this.statements = statements; + this.statements = Collections.unmodifiableList(statements); } @Override - public AStatement analyze(Variables variables) { + public void analyze(Variables variables) { if (statements == null || statements.isEmpty()) { throw createError(new IllegalArgumentException("Cannot generate an empty script.")); } variables.incrementScope(); - for (int index = 0; index < statements.size(); ++index) { - AStatement statement = statements.get(index); + AStatement last = statements.get(statements.size() - 1); + for (AStatement statement : statements) { // Note that we do not need to check after the last statement because // there is no statement that can be unreachable after the last. if (allEscape) { throw createError(new IllegalArgumentException("Unreachable statement.")); } - statement.lastSource = index == statements.size() - 1; - statements.set(index, statement.analyze(variables)); + statement.lastSource = statement == last; + + statement.analyze(variables); methodEscape = statement.methodEscape; allEscape = statement.allEscape; } variables.decrementScope(); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java index c8c02f9daa1..af9d7a65990 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SThrow.java @@ -38,7 +38,7 @@ public final class SThrow extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { expression.expected = Definition.EXCEPTION_TYPE; expression.analyze(variables); expression = expression.cast(variables); @@ -47,8 +47,6 @@ public final class SThrow extends AStatement { loopEscape = true; allEscape = true; statementCount = 1; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java index 65e66a9d9a5..c24c8273dba 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/STry.java @@ -24,6 +24,7 @@ import org.objectweb.asm.Label; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import java.util.Collections; import java.util.List; /** @@ -31,18 +32,18 @@ import java.util.List; */ public final class STry extends AStatement { - AStatement block; + final SBlock block; final List catches; public STry(Location location, SBlock block, List catches) { super(location); this.block = block; - this.catches = catches; + this.catches = Collections.unmodifiableList(catches); } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { if (block == null) { throw createError(new IllegalArgumentException("Extraneous try statement.")); } @@ -52,7 +53,7 @@ public final class STry extends AStatement { block.lastLoop = lastLoop; variables.incrementScope(); - block = block.analyze(variables); + block.analyze(variables); variables.decrementScope(); methodEscape = block.methodEscape; @@ -63,15 +64,13 @@ public final class STry extends AStatement { int statementCount = 0; - for (int index = 0; index < catches.size(); ++index) { - SCatch catc = catches.get(index); - + for (SCatch catc : catches) { catc.lastSource = lastSource; catc.inLoop = inLoop; catc.lastLoop = lastLoop; variables.incrementScope(); - catches.set(index, (SCatch)catc.analyze(variables)); + catc.analyze(variables); variables.decrementScope(); methodEscape &= catc.methodEscape; @@ -84,8 +83,6 @@ public final class STry extends AStatement { } this.statementCount = block.statementCount + statementCount; - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java index a886c3632c3..59c1bb75ee8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SWhile.java @@ -32,7 +32,7 @@ public final class SWhile extends AStatement { final int maxLoopCounter; AExpression condition; - AStatement block; + final SBlock block; public SWhile(Location location, int maxLoopCounter, AExpression condition, SBlock block) { super(location); @@ -43,7 +43,7 @@ public final class SWhile extends AStatement { } @Override - AStatement analyze(Variables variables) { + void analyze(Variables variables) { variables.incrementScope(); condition.expected = Definition.BOOLEAN_TYPE; @@ -68,7 +68,7 @@ public final class SWhile extends AStatement { block.beginLoop = true; block.inLoop = true; - block = block.analyze(variables); + block.analyze(variables); if (block.loopEscape && !block.anyContinue) { throw createError(new IllegalArgumentException("Extraneous while loop.")); @@ -89,8 +89,6 @@ public final class SWhile extends AStatement { } variables.decrementScope(); - - return this; } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java index 04727507edc..71cde33e979 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/package-info.java @@ -63,7 +63,6 @@ * {@link org.elasticsearch.painless.node.LStatic} - Represents a static type target. * {@link org.elasticsearch.painless.node.LString} - Represents a string constant. * {@link org.elasticsearch.painless.node.LVariable} - Represents a variable load/store. - * {@link org.elasticsearch.painless.node.SArrayEach} - Represents a for each loop shortcut for arrays. (Internal only.) * {@link org.elasticsearch.painless.node.SBlock} - Represents a set of statements as a branch of control-flow. * {@link org.elasticsearch.painless.node.SBreak} - Represents a break statement. * {@link org.elasticsearch.painless.node.SCatch} - Represents a catch block as part of a try-catch block. @@ -92,8 +91,7 @@ *

    * Generally, statement nodes have member data that evaluate legal control-flow during the analysis phase. * The typical order for statement nodes is for each node to call analyze on it's children during the analysis phase - * and write on it's children during the writing phase. Upon analysis completion, a statement will return either - * itself or another statement node depending on if a shortcut or def type was found. + * and write on it's children during the writing phase. *

    * Generally, expression nodes have member data that evaluate static types. The typical order for an expression node * during the analysis phase looks like the following: From 231268c89d1ddd29ce2c603d542c5523dcd86830 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 6 Jun 2016 21:04:59 -0400 Subject: [PATCH 37/39] remove outdated TODO --- .../src/main/java/org/elasticsearch/painless/Def.java | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index 614490d0295..f85dff2ed21 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -487,7 +487,6 @@ public final class Def { } else if (receiverClass.isArray()) { return ArrayIteratorHelper.newIterator(receiverClass); } else { - // TODO: arrays throw new IllegalArgumentException("Cannot iterate over [" + receiverClass.getCanonicalName() + "]"); } } From b3804c47f7e8ffdf2a98a34ac34dd48b8e97e39a Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 6 Jun 2016 18:21:08 -0700 Subject: [PATCH 38/39] Cleaned up SEach node. --- .../painless/WriterConstants.java | 5 + .../elasticsearch/painless/node/SEach.java | 99 +++++-------------- 2 files changed, 30 insertions(+), 74 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index a4804cde434..fce40d5a6e0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -30,6 +30,7 @@ import java.lang.invoke.CallSite; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.util.BitSet; +import java.util.Iterator; import java.util.Map; /** @@ -56,6 +57,10 @@ public final class WriterConstants { public final static Type MAP_TYPE = Type.getType(Map.class); public final static Method MAP_GET = getAsmMethod(Object.class, "get", Object.class); + public final static Type ITERATOR_TYPE = Type.getType(Iterator.class); + public final static Method ITERATOR_HASNEXT = getAsmMethod(boolean.class, "hasNext"); + public final static Method ITERATOR_NEXT = getAsmMethod(Object.class, "next"); + public final static Type UTILITY_TYPE = Type.getType(Utility.class); public final static Method STRING_TO_CHAR = getAsmMethod(char.class, "StringTochar", String.class); public final static Method CHAR_TO_STRING = getAsmMethod(String.class, "charToString", char.class); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 3d6d5e07fdc..2a8fb0e4063 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -35,6 +35,9 @@ import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; import static org.elasticsearch.painless.WriterConstants.DEF_BOOTSTRAP_HANDLE; +import static org.elasticsearch.painless.WriterConstants.ITERATOR_HASNEXT; +import static org.elasticsearch.painless.WriterConstants.ITERATOR_NEXT; +import static org.elasticsearch.painless.WriterConstants.ITERATOR_TYPE; /** * Represents a for-each loop shortcut for iterables. Defers to other S-nodes for non-iterable types. @@ -59,8 +62,6 @@ public class SEach extends AStatement { // Members for the iterable case. Variable iterator = null; Method method = null; - Method hasNext = null; - Method next = null; public SEach(Location location, int maxLoopCounter, String type, String name, AExpression expression, SBlock block) { super(location); @@ -78,16 +79,6 @@ public class SEach extends AStatement { expression.expected = expression.actual; expression = expression.cast(variables); - if (expression.actual.sort == Sort.ARRAY) { - analyzeArray(variables); - } else if (expression.actual.sort == Sort.DEF || Iterable.class.isAssignableFrom(expression.actual.clazz)) { - analyzeIterable(variables); - } else { - throw location.createError(new IllegalArgumentException("Illegal for each type [" + expression.actual.name + "].")); - } - } - - void analyzeArray(Variables variables) { final Type type; try { @@ -99,10 +90,14 @@ public class SEach extends AStatement { variables.incrementScope(); variable = variables.addVariable(location, type, name, true, false); - array = variables.addVariable(location, expression.actual, "#array" + location.getOffset(), true, false); - index = variables.addVariable(location, Definition.INT_TYPE, "#index" + location.getOffset(), true, false); - indexed = Definition.getType(expression.actual.struct, expression.actual.dimensions - 1); - cast = AnalyzerCaster.getLegalCast(location, indexed, type, true, true); + + if (expression.actual.sort == Sort.ARRAY) { + analyzeArray(variables, type); + } else if (expression.actual.sort == Sort.DEF || Iterable.class.isAssignableFrom(expression.actual.clazz)) { + analyzeIterable(variables, type); + } else { + throw location.createError(new IllegalArgumentException("Illegal for each type [" + expression.actual.name + "].")); + } if (block == null) { throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); @@ -126,24 +121,19 @@ public class SEach extends AStatement { variables.decrementScope(); } - void analyzeIterable(Variables variables) { - final Type type; - - try { - type = Definition.getType(this.type); - } catch (IllegalArgumentException exception) { - throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); - } - - variables.incrementScope(); - - Type itr = Definition.getType("Iterator"); - - variable = variables.addVariable(location, type, name, true, false); + void analyzeArray(Variables variables, Type type) { + // We must store the array and index as variables for securing slots on the stack, and + // also add the location offset to make the names unique in case of nested for each loops. + array = variables.addVariable(location, expression.actual, "#array" + location.getOffset(), true, false); + index = variables.addVariable(location, Definition.INT_TYPE, "#index" + location.getOffset(), true, false); + indexed = Definition.getType(expression.actual.struct, expression.actual.dimensions - 1); + cast = AnalyzerCaster.getLegalCast(location, indexed, type, true, true); + } + void analyzeIterable(Variables variables, Type type) { // We must store the iterator as a variable for securing a slot on the stack, and // also add the location offset to make the name unique in case of nested for each loops. - iterator = variables.addVariable(location, itr, "#itr" + location.getOffset(), true, false); + iterator = variables.addVariable(location, Definition.getType("Iterator"), "#itr" + location.getOffset(), true, false); if (expression.actual.sort == Sort.DEF) { method = null; @@ -156,48 +146,13 @@ public class SEach extends AStatement { } } - hasNext = itr.struct.methods.get(new MethodKey("hasNext", 0)); - - if (hasNext == null) { - throw location.createError(new IllegalArgumentException("Method [hasNext] does not exist for type [Iterator].")); - } else if (hasNext.rtn.sort != Sort.BOOL) { - throw location.createError(new IllegalArgumentException("Method [hasNext] does not return type [boolean].")); - } - - next = itr.struct.methods.get(new MethodKey("next", 0)); - - if (next == null) { - throw location.createError(new IllegalArgumentException("Method [next] does not exist for type [Iterator].")); - } else if (next.rtn.sort != Sort.DEF) { - throw location.createError(new IllegalArgumentException("Method [next] does not return type [def].")); - } - cast = AnalyzerCaster.getLegalCast(location, Definition.DEF_TYPE, type, true, true); - - if (block == null) { - throw location.createError(new IllegalArgumentException("Extraneous for each loop.")); - } - - block.beginLoop = true; - block.inLoop = true; - block.analyze(variables); - block.statementCount = Math.max(1, block.statementCount); - - if (block.loopEscape && !block.anyContinue) { - throw createError(new IllegalArgumentException("Extraneous for loop.")); - } - - statementCount = 1; - - if (maxLoopCounter > 0) { - loopCounterSlot = variables.getVariable(location, "#loop").slot; - } - - variables.decrementScope(); } @Override void write(MethodWriter writer) { + writer.writeStatementOffset(location); + if (array != null) { writeArray(writer); } else if (iterator != null) { @@ -208,8 +163,6 @@ public class SEach extends AStatement { } void writeArray(MethodWriter writer) { - writer.writeStatementOffset(location); - expression.write(writer); writer.visitVarInsn(array.type.type.getOpcode(Opcodes.ISTORE), array.slot); writer.push(-1); @@ -239,8 +192,6 @@ public class SEach extends AStatement { } void writeIterable(MethodWriter writer) { - writer.writeStatementOffset(location); - expression.write(writer); if (method == null) { @@ -261,11 +212,11 @@ public class SEach extends AStatement { writer.mark(begin); writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); - writer.invokeInterface(hasNext.owner.type, hasNext.method); + writer.invokeInterface(ITERATOR_TYPE, ITERATOR_HASNEXT); writer.ifZCmp(MethodWriter.EQ, end); writer.visitVarInsn(iterator.type.type.getOpcode(Opcodes.ILOAD), iterator.slot); - writer.invokeInterface(next.owner.type, next.method); + writer.invokeInterface(ITERATOR_TYPE, ITERATOR_NEXT); writer.writeCast(cast); writer.visitVarInsn(variable.type.type.getOpcode(Opcodes.ISTORE), variable.slot); From da74323141a69bd06fc19a70bc072143e9f0e986 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 6 Jun 2016 22:09:12 -0400 Subject: [PATCH 39/39] Register thread pool settings This commit refactors the handling of thread pool settings so that the individual settings can be registered rather than registering the top level group. With this refactoring, individual plugins must now register their own settings for custom thread pools that they need, but a dedicated API is provided for this in the thread pool module. This commit also renames the prefix on the thread pool settings from "threadpool" to "thread_pool". This enables a hard break on the settings so that: - some of the settings can be given more sensible names (e.g., the max number of threads in a scaling thread pool is now named "max" instead of "size") - change the soft limit on the number of threads in the bulk and indexing thread pools to a hard limit - the settings names for custom plugins for thread pools can be prefixed (e.g., "xpack.watcher.thread_pool.size") - remove dynamic thread pool settings Relates #18674 --- .../common/settings/ClusterSettings.java | 4 +- .../common/util/concurrent/EsExecutors.java | 13 +- .../java/org/elasticsearch/node/Node.java | 15 +- .../org/elasticsearch/plugins/Plugin.java | 14 + .../elasticsearch/plugins/PluginsService.java | 9 + .../threadpool/ExecutorBuilder.java | 91 ++++ .../threadpool/FixedExecutorBuilder.java | 135 ++++++ .../threadpool/ScalingExecutorBuilder.java | 129 ++++++ .../elasticsearch/threadpool/ThreadPool.java | 415 +++--------------- .../threadpool/ThreadPoolModule.java | 13 +- .../action/RejectionActionIT.java | 12 +- .../node/tasks/TaskManagerTestCase.java | 3 +- .../action/bulk/BulkProcessorRetryIT.java | 6 +- .../bulk/TransportBulkActionTookTests.java | 3 +- .../support/ListenableActionFutureTests.java | 3 +- .../TransportBroadcastByNodeActionTests.java | 3 +- .../TransportMasterNodeActionTests.java | 3 +- .../nodes/TransportNodesActionTests.java | 3 +- .../BroadcastReplicationTests.java | 3 +- .../TransportReplicationActionTests.java | 3 +- ...ortInstanceSingleOperationActionTests.java | 3 +- .../TransportClientNodesServiceTests.java | 3 +- .../action/shard/ShardStateActionTests.java | 3 +- .../health/ClusterStateHealthTests.java | 3 +- .../DelayedAllocationServiceTests.java | 3 +- .../cluster/service/ClusterServiceTests.java | 3 +- .../concurrent/PrioritizedExecutorsTests.java | 3 +- .../discovery/ZenFaultDetectionTests.java | 3 +- .../zen/NodeJoinControllerTests.java | 3 +- .../zen/ping/unicast/UnicastZenPingIT.java | 3 +- .../PublishClusterStateActionTests.java | 3 +- .../gateway/AsyncShardFetchTests.java | 3 +- .../http/netty/NettyHttpChannelTests.java | 3 +- .../netty/NettyHttpServerPipeliningTests.java | 3 +- .../netty/NettyHttpServerTransportTests.java | 3 +- .../elasticsearch/index/IndexModuleTests.java | 3 +- .../index/engine/InternalEngineTests.java | 3 +- .../index/engine/ShadowEngineTests.java | 3 +- .../fielddata/IndexFieldDataServiceTests.java | 3 +- .../mapper/DynamicMappingDisabledTests.java | 3 +- .../indices/store/IndicesStoreTests.java | 3 +- .../jvm/JvmGcMonitorServiceSettingsTests.java | 3 +- .../script/NativeScriptTests.java | 3 +- .../search/SearchWithRejectionsIT.java | 4 +- .../threadpool/FixedThreadPoolTests.java | 17 +- .../threadpool/ScalingThreadPoolTests.java | 88 +--- .../threadpool/SimpleThreadPoolIT.java | 57 --- .../ThreadPoolSerializationTests.java | 2 +- .../UpdateThreadPoolSettingsTests.java | 283 +++--------- .../AbstractSimpleTransportTestCase.java | 3 +- .../NettySizeHeaderFrameDecoderTests.java | 1 - .../NettyTransportServiceHandshakeTests.java | 3 +- .../netty/NettyScheduledPingTests.java | 3 +- .../netty/NettyTransportMultiPortTests.java | 11 +- .../watcher/ResourceWatcherServiceTests.java | 5 +- .../migration/migrate_5_0/settings.asciidoc | 13 + docs/reference/modules/threadpool.asciidoc | 24 +- .../messy/tests/TemplateQueryParserTests.java | 3 +- ...tAsyncBulkIndexByScrollActionTestCase.java | 3 +- .../reindex/AsyncBulkByScrollActionTests.java | 9 +- .../index/reindex/BulkByScrollTaskTests.java | 5 +- .../index/reindex/RetryTests.java | 8 +- .../discovery/ec2/Ec2DiscoveryTests.java | 3 +- .../discovery/gce/GceDiscoveryTests.java | 3 +- .../org/elasticsearch/test/ESTestCase.java | 1 - .../elasticsearch/test/client/NoOpClient.java | 3 +- .../threadpool/TestThreadPool.java | 31 ++ 67 files changed, 705 insertions(+), 827 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java create mode 100644 core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java create mode 100644 core/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java create mode 100644 test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c9abafbbe54..6eb8df68242 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -190,7 +190,6 @@ public final class ClusterSettings extends AbstractScopedSettings { RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, - ThreadPool.THREADPOOL_GROUP_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, @@ -419,6 +418,7 @@ public final class ClusterSettings extends AbstractScopedSettings { ResourceWatcherService.RELOAD_INTERVAL_HIGH, ResourceWatcherService.RELOAD_INTERVAL_MEDIUM, ResourceWatcherService.RELOAD_INTERVAL_LOW, - SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING + SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING, + ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING ))); } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 2d45a6fecff..5ac94f8b386 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -83,13 +83,16 @@ public class EsExecutors { } public static String threadName(Settings settings, String namePrefix) { - String name = settings.get("node.name"); - if (name == null) { - name = "elasticsearch"; + String nodeName = settings.get("node.name"); + if (nodeName == null) { + return threadName("", namePrefix); } else { - name = "elasticsearch[" + name + "]"; + return threadName(nodeName, namePrefix); } - return name + "[" + namePrefix + "]"; + } + + public static String threadName(final String nodeName, final String namePrefix) { + return "elasticsearch" + (nodeName.isEmpty() ? "" : "[") + nodeName + (nodeName.isEmpty() ? "" : "]") + "[" + namePrefix + "]"; } public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index cf33770fd16..04063ce5864 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -98,6 +98,7 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; import org.elasticsearch.transport.TransportService; @@ -210,11 +211,12 @@ public class Node implements Closeable { throw new IllegalStateException("Failed to created node environment", ex); } final NetworkService networkService = new NetworkService(settings); - final ThreadPool threadPool = new ThreadPool(settings); + final List> executorBuilders = pluginsService.getExecutorBuilders(settings); + final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(); boolean success = false; try { - final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); ModulesBuilder modules = new ModulesBuilder(); modules.add(new Version.Module(version)); modules.add(new CircuitBreakerModule(settings)); @@ -222,6 +224,7 @@ public class Node implements Closeable { for (Module pluginModule : pluginsService.nodeModules()) { modules.add(pluginModule); } + final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); modules.add(new PluginsModule(pluginsService)); SettingsModule settingsModule = new SettingsModule(this.settings); modules.add(settingsModule); @@ -232,7 +235,8 @@ public class Node implements Closeable { modules.add(scriptModule); modules.add(new NodeEnvironmentModule(nodeEnvironment)); modules.add(new ClusterNameModule(this.settings)); - modules.add(new ThreadPoolModule(threadPool)); + final ThreadPoolModule threadPoolModule = new ThreadPoolModule(threadPool); + modules.add(threadPoolModule); modules.add(new DiscoveryModule(this.settings)); modules.add(new ClusterModule(this.settings)); modules.add(new IndicesModule()); @@ -246,11 +250,14 @@ public class Node implements Closeable { modules.add(new AnalysisModule(environment)); pluginsService.processModules(modules); + scriptModule.prepareSettings(settingsModule); + + threadPoolModule.prepareSettings(settingsModule); + injector = modules.createInjector(); client = injector.getInstance(Client.class); - threadPool.setClusterSettings(injector.getInstance(ClusterSettings.class)); success = true; } catch (IOException ex) { throw new ElasticsearchException("failed to bind service", ex); diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java index 1efc151836d..0283567bf80 100644 --- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -23,9 +23,12 @@ import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collection; import java.util.Collections; +import java.util.List; /** * An extension point allowing to plug in custom functionality. @@ -80,4 +83,15 @@ public abstract class Plugin { */ @Deprecated public final void onModule(IndexModule indexModule) {} + + /** + * Provides the list of this plugin's custom thread pools, empty if + * none. + * + * @param settings the current settings + * @return executors builders for this plugin's custom thread pools + */ + public List> getExecutorBuilders(Settings settings) { + return Collections.emptyList(); + } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index f373da6987d..4f123625387 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.threadpool.ExecutorBuilder; import java.io.IOException; import java.lang.reflect.InvocationTargetException; @@ -261,6 +262,14 @@ public class PluginsService extends AbstractComponent { return modules; } + public List> getExecutorBuilders(Settings settings) { + final ArrayList> builders = new ArrayList<>(); + for (final Tuple plugin : plugins) { + builders.addAll(plugin.v2().getExecutorBuilders(settings)); + } + return builders; + } + public Collection> nodeServices() { List> services = new ArrayList<>(); for (Tuple plugin : plugins) { diff --git a/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java new file mode 100644 index 00000000000..434e6fc509c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.util.List; + +/** + * Base class for executor builders. + * + * @param the underlying type of the executor settings + */ +public abstract class ExecutorBuilder { + + private final String name; + + public ExecutorBuilder(String name) { + this.name = name; + } + + protected String name() { + return name; + } + + protected static String settingsKey(final String prefix, final String key) { + return String.join(".", prefix, key); + } + + /** + * The list of settings this builder will register. + * + * @return the list of registered settings + */ + abstract List> getRegisteredSettings(); + + /** + * Return an executor settings object from the node-level settings. + * + * @param settings the node-level settings + * @return the executor settings object + */ + abstract U getSettings(Settings settings); + + /** + * Builds the executor with the specified executor settings. + * + * @param settings the executor settings + * @param threadContext the current thread context + * @return a new executor built from the specified executor settings + */ + abstract ThreadPool.ExecutorHolder build(U settings, ThreadContext threadContext); + + /** + * Format the thread pool info object for this executor. + * + * @param info the thread pool info object to format + * @return a formatted thread pool info (useful for logging) + */ + abstract String formatInfo(ThreadPool.Info info); + + static abstract class ExecutorSettings { + + protected final String nodeName; + + public ExecutorSettings(String nodeName) { + this.nodeName = nodeName; + } + + } + +} diff --git a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java new file mode 100644 index 00000000000..0735774d972 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.SizeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.Executor; +import java.util.concurrent.ThreadFactory; + +/** + * A builder for fixed executors. + */ +public final class FixedExecutorBuilder extends ExecutorBuilder { + + private final Setting sizeSetting; + private final Setting queueSizeSetting; + + /** + * Construct a fixed executor builder; the settings will have the + * key prefix "thread_pool." followed by the executor name. + * + * @param settings the node-level settings + * @param name the name of the executor + * @param size the fixed number of threads + * @param queueSize the size of the backing queue, -1 for unbounded + */ + FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize) { + this(settings, name, size, queueSize, "thread_pool." + name); + } + + /** + * Construct a fixed executor builder. + * + * @param settings the node-level settings + * @param name the name of the executor + * @param size the fixed number of threads + * @param queueSize the size of the backing queue, -1 for unbounded + * @param prefix the prefix for the settings keys + */ + public FixedExecutorBuilder(final Settings settings, final String name, final int size, final int queueSize, final String prefix) { + super(name); + final String sizeKey = settingsKey(prefix, "size"); + this.sizeSetting = + new Setting<>( + sizeKey, + s -> Integer.toString(size), + s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), + Setting.Property.NodeScope); + final String queueSizeKey = settingsKey(prefix, "queue_size"); + this.queueSizeSetting = + Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope); + } + + private int applyHardSizeLimit(final Settings settings, final String name) { + if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { + return 1 + EsExecutors.boundedNumberOfProcessors(settings); + } else { + return Integer.MAX_VALUE; + } + } + + @Override + List> getRegisteredSettings() { + return Arrays.asList(sizeSetting, queueSizeSetting); + } + + @Override + FixedExecutorSettings getSettings(Settings settings) { + final String nodeName = Node.NODE_NAME_SETTING.get(settings); + final int size = sizeSetting.get(settings); + final int queueSize = queueSizeSetting.get(settings); + return new FixedExecutorSettings(nodeName, size, queueSize); + } + + @Override + ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { + int size = settings.size; + int queueSize = settings.queueSize; + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + Executor executor = EsExecutors.newFixed(name(), size, queueSize, threadFactory, threadContext); + final ThreadPool.Info info = + new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); + return new ThreadPool.ExecutorHolder(executor, info); + } + + @Override + String formatInfo(ThreadPool.Info info) { + return String.format( + Locale.ROOT, + "name [%s], size [%d], queue size [%s]", + info.getName(), + info.getMax(), + info.getQueueSize() == null ? "unbounded" : info.getQueueSize()); + } + + static class FixedExecutorSettings extends ExecutorBuilder.ExecutorSettings { + + private final int size; + private final int queueSize; + + public FixedExecutorSettings(final String nodeName, final int size, final int queueSize) { + super(nodeName); + this.size = size; + this.queueSize = queueSize; + } + + } + +} diff --git a/core/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java new file mode 100644 index 00000000000..68c70c83c19 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.Executor; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +/** + * A builder for scaling executors. + */ +public final class ScalingExecutorBuilder extends ExecutorBuilder { + + private final Setting coreSetting; + private final Setting maxSetting; + private final Setting keepAliveSetting; + + /** + * Construct a scaling executor builder; the settings will have the + * key prefix "thread_pool." followed by the executor name. + * + * @param name the name of the executor + * @param core the minimum number of threads in the pool + * @param max the maximum number of threads in the pool + * @param keepAlive the time that spare threads above {@code core} + * threads will be kept alive + */ + public ScalingExecutorBuilder(final String name, final int core, final int max, final TimeValue keepAlive) { + this(name, core, max, keepAlive, "thread_pool." + name); + } + + /** + * Construct a scaling executor builder; the settings will have the + * specified key prefix. + * + * @param name the name of the executor + * @param core the minimum number of threads in the pool + * @param max the maximum number of threads in the pool + * @param keepAlive the time that spare threads above {@code core} + * threads will be kept alive + * @param prefix the prefix for the settings keys + */ + public ScalingExecutorBuilder(final String name, final int core, final int max, final TimeValue keepAlive, final String prefix) { + super(name); + this.coreSetting = + Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope); + this.maxSetting = Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope); + this.keepAliveSetting = + Setting.timeSetting(settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope); + } + + @Override + List> getRegisteredSettings() { + return Arrays.asList(coreSetting, maxSetting, keepAliveSetting); + } + + @Override + ScalingExecutorSettings getSettings(Settings settings) { + final String nodeName = Node.NODE_NAME_SETTING.get(settings); + final int coreThreads = coreSetting.get(settings); + final int maxThreads = maxSetting.get(settings); + final TimeValue keepAlive = keepAliveSetting.get(settings); + return new ScalingExecutorSettings(nodeName, coreThreads, maxThreads, keepAlive); + } + + ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final ThreadContext threadContext) { + TimeValue keepAlive = settings.keepAlive; + int core = settings.core; + int max = settings.max; + final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final Executor executor = + EsExecutors.newScaling(name(), core, max, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext); + return new ThreadPool.ExecutorHolder(executor, info); + } + + @Override + String formatInfo(ThreadPool.Info info) { + return String.format( + Locale.ROOT, + "name [%s], core [%d], max [%d], keep alive [%s]", + info.getName(), + info.getMin(), + info.getMax(), + info.getKeepAlive()); + } + + static class ScalingExecutorSettings extends ExecutorBuilder.ExecutorSettings { + + private final int core; + private final int max; + private final TimeValue keepAlive; + + public ScalingExecutorSettings(final String nodeName, final int core, final int max, final TimeValue keepAlive) { + super(nodeName); + this.core = core; + this.max = max; + this.keepAlive = keepAlive; + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 0d24bb74e17..5c31323b3d8 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -26,11 +26,8 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.SizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsAbortPolicy; @@ -45,31 +42,22 @@ import org.elasticsearch.node.Node; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; -import java.util.Queue; -import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.common.unit.SizeValue.parseSizeValue; -import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; -/** - * - */ public class ThreadPool extends AbstractComponent implements Closeable { public static class Names { @@ -146,164 +134,85 @@ public class ThreadPool extends AbstractComponent implements Closeable { THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } - private static void add(Map executorSettings, ExecutorSettingsBuilder builder) { - Settings settings = builder.build(); - String name = settings.get("name"); - executorSettings.put(name, settings); - } - - private static abstract class ExecutorSettingsBuilder> { - - private final Settings.Builder builder; - - protected ExecutorSettingsBuilder(String name, ThreadPoolType threadPoolType) { - if (THREAD_POOL_TYPES.get(name) != threadPoolType) { - throw new IllegalArgumentException("thread pool [" + name + "] must be of type [" + threadPoolType + "]"); - } - builder = Settings.builder(); - builder.put("name", name); - builder.put("type", threadPoolType.getType()); - } - - public T keepAlive(String keepAlive) { - return add("keep_alive", keepAlive); - } - - public T queueSize(int queueSize) { - return add("queue_size", queueSize); - } - - protected T add(String setting, int value) { - return add(setting, Integer.toString(value)); - } - - - protected T add(String setting, String value) { - builder.put(setting, value); - @SuppressWarnings("unchecked") final T executor = (T)this; - return executor; - } - - public final Settings build() { return builder.build(); } - - } - - private static class FixedExecutorSettingsBuilder extends ExecutorSettingsBuilder { - - public FixedExecutorSettingsBuilder(String name) { - super(name, ThreadPoolType.FIXED); - } - - public FixedExecutorSettingsBuilder size(int size) { - return add("size", Integer.toString(size)); - } - - } - - private static class ScalingExecutorSettingsBuilder extends ExecutorSettingsBuilder { - - public ScalingExecutorSettingsBuilder(String name) { - super(name, ThreadPoolType.SCALING); - } - - public ScalingExecutorSettingsBuilder min(int min) { - return add("min", min); - } - - - public ScalingExecutorSettingsBuilder size(int size) { - return add("size", size); - } - } - - public static final Setting THREADPOOL_GROUP_SETTING = - Setting.groupSetting("threadpool.", Property.Dynamic, Property.NodeScope); - - private volatile Map executors; - - private final Map defaultExecutorTypeSettings; - - private final Queue retiredExecutors = new ConcurrentLinkedQueue<>(); + private Map executors = new HashMap<>(); private final ScheduledThreadPoolExecutor scheduler; private final EstimatedTimeThread estimatedTimeThread; - private final AtomicBoolean settingsListenerIsSet = new AtomicBoolean(false); - static final Executor DIRECT_EXECUTOR = command -> command.run(); private final ThreadContext threadContext; - public ThreadPool(String name) { - this(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).build()); + private final Map builders; + + public Collection builders() { + return Collections.unmodifiableCollection(builders.values()); } - public ThreadPool(Settings settings) { + public static Setting ESTIMATED_TIME_INTERVAL_SETTING = + Setting.timeSetting("thread_pool.estimated_time_interval", TimeValue.timeValueMillis(200), Setting.Property.NodeScope); + + public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { super(settings); - assert Node.NODE_NAME_SETTING.exists(settings) : "ThreadPool's settings should contain a name"; - threadContext = new ThreadContext(settings); - Map groupSettings = THREADPOOL_GROUP_SETTING.get(settings).getAsGroups(); - validate(groupSettings); + assert Node.NODE_NAME_SETTING.exists(settings); - int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); - int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors); - int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors); - Map defaultExecutorTypeSettings = new HashMap<>(); - int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.GENERIC).min(4).size(genericThreadPoolMax).keepAlive("30s")); - add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.INDEX).size(availableProcessors).queueSize(200)); - add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.BULK).size(availableProcessors).queueSize(50)); - add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.GET).size(availableProcessors).queueSize(1000)); - add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.SEARCH).size(((availableProcessors * 3) / 2) + 1).queueSize(1000)); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.MANAGEMENT).min(1).size(5).keepAlive("5m")); + final Map builders = new HashMap<>(); + final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + final int halfProcMaxAt5 = halfNumberOfProcessorsMaxFive(availableProcessors); + final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors); + final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); + builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30))); + builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200)); + builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 50)); + builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000)); + builders.put(Names.SEARCH, new FixedExecutorBuilder(settings, Names.SEARCH, ((availableProcessors * 3) / 2) + 1, 1000)); + builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); // no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded // the assumption here is that the listeners should be very lightweight on the listeners side - add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.LISTENER).size(halfProcMaxAt10)); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FLUSH).min(1).size(halfProcMaxAt5).keepAlive("5m")); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.REFRESH).min(1).size(halfProcMaxAt10).keepAlive("5m")); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.WARMER).min(1).size(halfProcMaxAt5).keepAlive("5m")); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.SNAPSHOT).min(1).size(halfProcMaxAt5).keepAlive("5m")); - add(defaultExecutorTypeSettings, new FixedExecutorSettingsBuilder(Names.FORCE_MERGE).size(1)); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FETCH_SHARD_STARTED).min(1).size(availableProcessors * 2).keepAlive("5m")); - add(defaultExecutorTypeSettings, new ScalingExecutorSettingsBuilder(Names.FETCH_SHARD_STORE).min(1).size(availableProcessors * 2).keepAlive("5m")); - - this.defaultExecutorTypeSettings = unmodifiableMap(defaultExecutorTypeSettings); - - Map executors = new HashMap<>(); - for (Map.Entry executor : defaultExecutorTypeSettings.entrySet()) { - executors.put(executor.getKey(), build(executor.getKey(), groupSettings.get(executor.getKey()), executor.getValue())); - } - - // Building custom thread pools - for (Map.Entry entry : groupSettings.entrySet()) { - if (executors.containsKey(entry.getKey())) { - continue; + builders.put(Names.LISTENER, new FixedExecutorBuilder(settings, Names.LISTENER, halfProcMaxAt10, -1)); + builders.put(Names.FLUSH, new ScalingExecutorBuilder(Names.FLUSH, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put(Names.REFRESH, new ScalingExecutorBuilder(Names.REFRESH, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5))); + builders.put(Names.WARMER, new ScalingExecutorBuilder(Names.WARMER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put(Names.SNAPSHOT, new ScalingExecutorBuilder(Names.SNAPSHOT, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put(Names.FETCH_SHARD_STARTED, new ScalingExecutorBuilder(Names.FETCH_SHARD_STARTED, 1, 2 * availableProcessors, TimeValue.timeValueMinutes(5))); + builders.put(Names.FORCE_MERGE, new FixedExecutorBuilder(settings, Names.FORCE_MERGE, 1, -1)); + builders.put(Names.FETCH_SHARD_STORE, new ScalingExecutorBuilder(Names.FETCH_SHARD_STORE, 1, 2 * availableProcessors, TimeValue.timeValueMinutes(5))); + for (final ExecutorBuilder builder : customBuilders) { + if (builders.containsKey(builder.name())) { + throw new IllegalArgumentException("builder with name [" + builder.name() + "] already exists"); } - executors.put(entry.getKey(), build(entry.getKey(), entry.getValue(), Settings.EMPTY)); + builders.put(builder.name(), builder); + } + this.builders = Collections.unmodifiableMap(builders); + + threadContext = new ThreadContext(settings); + + final Map executors = new HashMap<>(); + for (@SuppressWarnings("unchecked") final Map.Entry entry : builders.entrySet()) { + final ExecutorBuilder.ExecutorSettings executorSettings = entry.getValue().getSettings(settings); + final ExecutorHolder executorHolder = entry.getValue().build(executorSettings, threadContext); + if (executors.containsKey(executorHolder.info.getName())) { + throw new IllegalStateException("duplicate executors with name [" + executorHolder.info.getName() + "] registered"); + } + logger.debug("created thread pool: " + entry.getValue().formatInfo(executorHolder.info)); + executors.put(entry.getKey(), executorHolder); } executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT))); this.executors = unmodifiableMap(executors); + this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy()); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); this.scheduler.setRemoveOnCancelPolicy(true); - TimeValue estimatedTimeInterval = settings.getAsTime("threadpool.estimated_time_interval", TimeValue.timeValueMillis(200)); + TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings); this.estimatedTimeThread = new EstimatedTimeThread(EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis()); this.estimatedTimeThread.start(); } - public void setClusterSettings(ClusterSettings clusterSettings) { - if(settingsListenerIsSet.compareAndSet(false, true)) { - clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups())); - } else { - throw new IllegalStateException("the node settings listener was set more then once"); - } - } - public long estimatedTimeInMillis() { return estimatedTimeThread.estimatedTimeInMillis(); } @@ -440,12 +349,6 @@ public class ThreadPool extends AbstractComponent implements Closeable { ((ThreadPoolExecutor) executor.executor()).shutdownNow(); } } - - ExecutorHolder holder; - while ((holder = retiredExecutors.poll()) != null) { - ThreadPoolExecutor executor = (ThreadPoolExecutor) holder.executor(); - executor.shutdownNow(); - } } public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { @@ -456,142 +359,10 @@ public class ThreadPool extends AbstractComponent implements Closeable { } } - ExecutorHolder holder; - while ((holder = retiredExecutors.poll()) != null) { - ThreadPoolExecutor executor = (ThreadPoolExecutor) holder.executor(); - result &= executor.awaitTermination(timeout, unit); - } - estimatedTimeThread.join(unit.toMillis(timeout)); return result; } - private ExecutorHolder build(String name, @Nullable Settings settings, Settings defaultSettings) { - return rebuild(name, null, settings, defaultSettings); - } - - private ExecutorHolder rebuild(String name, ExecutorHolder previousExecutorHolder, @Nullable Settings settings, Settings defaultSettings) { - if (Names.SAME.equals(name)) { - // Don't allow to change the "same" thread executor - return previousExecutorHolder; - } - if (settings == null) { - settings = Settings.Builder.EMPTY_SETTINGS; - } - Info previousInfo = previousExecutorHolder != null ? previousExecutorHolder.info : null; - String type = settings.get("type", previousInfo != null ? previousInfo.getThreadPoolType().getType() : defaultSettings.get("type")); - ThreadPoolType threadPoolType = ThreadPoolType.fromType(type); - ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, name); - if (ThreadPoolType.DIRECT == threadPoolType) { - if (previousExecutorHolder != null) { - logger.debug("updating thread pool [{}], type [{}]", name, type); - } else { - logger.debug("creating thread pool [{}], type [{}]", name, type); - } - return new ExecutorHolder(DIRECT_EXECUTOR, new Info(name, threadPoolType)); - } else if (ThreadPoolType.FIXED == threadPoolType) { - int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings)); - SizeValue defaultQueueSize = getAsSizeOrUnbounded(defaultSettings, "queue", getAsSizeOrUnbounded(defaultSettings, "queue_size", null)); - - if (previousExecutorHolder != null) { - assert previousInfo != null; - if (ThreadPoolType.FIXED == previousInfo.getThreadPoolType()) { - SizeValue updatedQueueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", previousInfo.getQueueSize()))); - if (Objects.equals(previousInfo.getQueueSize(), updatedQueueSize)) { - int updatedSize = applyHardSizeLimit(name, settings.getAsInt("size", previousInfo.getMax())); - if (previousInfo.getMax() != updatedSize) { - logger.debug("updating thread pool [{}], type [{}], size [{}], queue_size [{}]", name, type, updatedSize, updatedQueueSize); - // if you think this code is crazy: that's because it is! - if (updatedSize > previousInfo.getMax()) { - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize); - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedSize); - } else { - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedSize); - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize); - } - return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, updatedSize, updatedSize, null, updatedQueueSize)); - } - return previousExecutorHolder; - } - } - if (previousInfo.getMax() >= 0) { - defaultSize = previousInfo.getMax(); - } - defaultQueueSize = previousInfo.getQueueSize(); - } - - int size = applyHardSizeLimit(name, settings.getAsInt("size", defaultSize)); - SizeValue queueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", defaultQueueSize))); - logger.debug("creating thread pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize); - Executor executor = EsExecutors.newFixed(name, size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory, threadContext); - return new ExecutorHolder(executor, new Info(name, threadPoolType, size, size, null, queueSize)); - } else if (ThreadPoolType.SCALING == threadPoolType) { - TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5)); - int defaultMin = defaultSettings.getAsInt("min", 1); - int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings)); - final Integer queueSize = settings.getAsInt("queue_size", defaultSettings.getAsInt("queue_size", null)); - if (queueSize != null) { - throw new IllegalArgumentException("thread pool [" + name + "] of type scaling can not have its queue re-sized but was [" + queueSize + "]"); - } - if (previousExecutorHolder != null) { - if (ThreadPoolType.SCALING == previousInfo.getThreadPoolType()) { - TimeValue updatedKeepAlive = settings.getAsTime("keep_alive", previousInfo.getKeepAlive()); - int updatedMin = settings.getAsInt("min", previousInfo.getMin()); - int updatedSize = settings.getAsInt("max", settings.getAsInt("size", previousInfo.getMax())); - if (!previousInfo.getKeepAlive().equals(updatedKeepAlive) || previousInfo.getMin() != updatedMin || previousInfo.getMax() != updatedSize) { - logger.debug("updating thread pool [{}], type [{}], keep_alive [{}]", name, type, updatedKeepAlive); - if (!previousInfo.getKeepAlive().equals(updatedKeepAlive)) { - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setKeepAliveTime(updatedKeepAlive.millis(), TimeUnit.MILLISECONDS); - } - if (previousInfo.getMin() != updatedMin) { - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setCorePoolSize(updatedMin); - } - if (previousInfo.getMax() != updatedSize) { - ((EsThreadPoolExecutor) previousExecutorHolder.executor()).setMaximumPoolSize(updatedSize); - } - return new ExecutorHolder(previousExecutorHolder.executor(), new Info(name, threadPoolType, updatedMin, updatedSize, updatedKeepAlive, null)); - } - return previousExecutorHolder; - } - if (previousInfo.getKeepAlive() != null) { - defaultKeepAlive = previousInfo.getKeepAlive(); - } - if (previousInfo.getMin() >= 0) { - defaultMin = previousInfo.getMin(); - } - if (previousInfo.getMax() >= 0) { - defaultSize = previousInfo.getMax(); - } - } - TimeValue keepAlive = settings.getAsTime("keep_alive", defaultKeepAlive); - int min = settings.getAsInt("min", defaultMin); - int size = settings.getAsInt("max", settings.getAsInt("size", defaultSize)); - if (previousExecutorHolder != null) { - logger.debug("updating thread pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive); - } else { - logger.debug("creating thread pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive); - } - Executor executor = EsExecutors.newScaling(name, min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext); - return new ExecutorHolder(executor, new Info(name, threadPoolType, min, size, keepAlive, null)); - } - throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]"); - } - - private int applyHardSizeLimit(String name, int size) { - int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); - if ((name.equals(Names.BULK) || name.equals(Names.INDEX)) && size > availableProcessors) { - // We use a hard max size for the indexing pools, because if too many threads enter Lucene's IndexWriter, it means - // too many segments written, too frequently, too much merging, etc: - // TODO: I would love to be loud here (throw an exception if you ask for a too-big size), but I think this is dangerous - // because on upgrade this setting could be in cluster state and hard for the user to correct? - logger.warn("requested thread pool size [{}] for [{}] is too large; setting to maximum [{}] instead", - size, name, availableProcessors); - size = availableProcessors; - } - - return size; - } - /** * Constrains a value between minimum and maximum values * (inclusive). @@ -618,92 +389,6 @@ public class ThreadPool extends AbstractComponent implements Closeable { return boundedBy(2 * numberOfProcessors, 2, Integer.MAX_VALUE); } - private void updateSettings(Settings settings) { - Map groupSettings = settings.getAsGroups(); - if (groupSettings.isEmpty()) { - return; - } - - for (Map.Entry executor : defaultExecutorTypeSettings.entrySet()) { - Settings updatedSettings = groupSettings.get(executor.getKey()); - if (updatedSettings == null) { - continue; - } - - ExecutorHolder oldExecutorHolder = executors.get(executor.getKey()); - ExecutorHolder newExecutorHolder = rebuild(executor.getKey(), oldExecutorHolder, updatedSettings, executor.getValue()); - if (!oldExecutorHolder.equals(newExecutorHolder)) { - Map newExecutors = new HashMap<>(executors); - newExecutors.put(executor.getKey(), newExecutorHolder); - executors = unmodifiableMap(newExecutors); - if (!oldExecutorHolder.executor().equals(newExecutorHolder.executor()) && oldExecutorHolder.executor() instanceof EsThreadPoolExecutor) { - retiredExecutors.add(oldExecutorHolder); - ((EsThreadPoolExecutor) oldExecutorHolder.executor()).shutdown(new ExecutorShutdownListener(oldExecutorHolder)); - } - } - } - - // Building custom thread pools - for (Map.Entry entry : groupSettings.entrySet()) { - if (defaultExecutorTypeSettings.containsKey(entry.getKey())) { - continue; - } - - ExecutorHolder oldExecutorHolder = executors.get(entry.getKey()); - ExecutorHolder newExecutorHolder = rebuild(entry.getKey(), oldExecutorHolder, entry.getValue(), Settings.EMPTY); - // Can't introduce new thread pools at runtime, because The oldExecutorHolder variable will be null in the - // case the settings contains a thread pool not defined in the initial settings in the constructor. The if - // statement will then fail and so this prevents the addition of new thread groups at runtime, which is desired. - if (!newExecutorHolder.equals(oldExecutorHolder)) { - Map newExecutors = new HashMap<>(executors); - newExecutors.put(entry.getKey(), newExecutorHolder); - executors = unmodifiableMap(newExecutors); - if (!oldExecutorHolder.executor().equals(newExecutorHolder.executor()) && oldExecutorHolder.executor() instanceof EsThreadPoolExecutor) { - retiredExecutors.add(oldExecutorHolder); - ((EsThreadPoolExecutor) oldExecutorHolder.executor()).shutdown(new ExecutorShutdownListener(oldExecutorHolder)); - } - } - } - } - - private void validate(Map groupSettings) { - for (String key : groupSettings.keySet()) { - if (!THREAD_POOL_TYPES.containsKey(key)) { - continue; - } - String type = groupSettings.get(key).get("type"); - ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key); - // TODO: the type equality check can be removed after #3760/#6732 are addressed - if (type != null && !correctThreadPoolType.getType().equals(type)) { - throw new IllegalArgumentException("setting " + THREADPOOL_GROUP_SETTING.getKey() + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); - } - } - } - - /** - * A thread pool size can also be unbounded and is represented by -1, which is not supported by SizeValue (which only supports positive numbers) - */ - private SizeValue getAsSizeOrUnbounded(Settings settings, String setting, SizeValue defaultValue) throws SettingsException { - if ("-1".equals(settings.get(setting))) { - return null; - } - return parseSizeValue(settings.get(setting), defaultValue); - } - - class ExecutorShutdownListener implements EsThreadPoolExecutor.ShutdownListener { - - private ExecutorHolder holder; - - public ExecutorShutdownListener(ExecutorHolder holder) { - this.holder = holder; - } - - @Override - public void onTerminated() { - retiredExecutors.remove(holder); - } - } - class LoggingRunnable implements Runnable { private final Runnable runnable; diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java index 9a507f883ac..843febfef8c 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolModule.java @@ -20,20 +20,25 @@ package org.elasticsearch.threadpool; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.settings.SettingsModule; -/** - * - */ public class ThreadPoolModule extends AbstractModule { private final ThreadPool threadPool; - public ThreadPoolModule(ThreadPool threadPool) { + public ThreadPoolModule(final ThreadPool threadPool) { this.threadPool = threadPool; } + public void prepareSettings(SettingsModule settingsModule) { + for (final ExecutorBuilder builder : threadPool.builders()) { + builder.getRegisteredSettings().forEach(settingsModule::registerSetting); + } + } + @Override protected void configure() { bind(ThreadPool.class).toInstance(threadPool); } + } diff --git a/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java index fb0283db48f..6f100170250 100644 --- a/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/core/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -45,12 +45,12 @@ public class RejectionActionIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("threadpool.search.size", 1) - .put("threadpool.search.queue_size", 1) - .put("threadpool.index.size", 1) - .put("threadpool.index.queue_size", 1) - .put("threadpool.get.size", 1) - .put("threadpool.get.queue_size", 1) + .put("thread_pool.search.size", 1) + .put("thread_pool.search.queue_size", 1) + .put("thread_pool.index.size", 1) + .put("thread_pool.index.queue_size", 1) + .put("thread_pool.get.size", 1) + .put("thread_pool.get.queue_size", 1) .build(); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index f04c758ef9a..a83a05b8ad8 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -42,6 +42,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; @@ -72,7 +73,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool(TransportTasksActionTests.class.getSimpleName()); + threadPool = new TestThreadPool(TransportTasksActionTests.class.getSimpleName()); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index e950da05d37..87249bc8b5a 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -52,10 +52,10 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { .put(super.nodeSettings(nodeOrdinal)) // don't mess with this one! It's quite sensitive to a low queue size // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) - //.put("threadpool.listener.queue_size", 1) - .put("threadpool.get.queue_size", 1) + //.put("thread_pool.listener.queue_size", 1) + .put("thread_pool.get.queue_size", 1) // default is 50 - .put("threadpool.bulk.queue_size", 30) + .put("thread_pool.bulk.queue_size", 30) .build(); } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 5131ddebca7..908fb2ddd4d 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.junit.After; @@ -63,7 +64,7 @@ public class TransportBulkActionTookTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("TransportBulkActionTookTests"); + threadPool = new TestThreadPool("TransportBulkActionTookTests"); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java b/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java index a6c21ca3d66..80492f0be61 100644 --- a/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.support; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transports; @@ -31,7 +32,7 @@ import java.util.concurrent.atomic.AtomicReference; public class ListenableActionFutureTests extends ESTestCase { public void testListenerIsCallableFromNetworkThreads() throws Throwable { - ThreadPool threadPool = new ThreadPool("testListenerIsCallableFromNetworkThreads"); + ThreadPool threadPool = new TestThreadPool("testListenerIsCallableFromNetworkThreads"); try { final PlainListenableActionFuture future = new PlainListenableActionFuture<>(threadPool); final CountDownLatch listenerCalled = new CountDownLatch(1); diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 0fac744625f..07ddfa8e49f 100644 --- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportResponse; @@ -182,7 +183,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { @BeforeClass public static void startThreadPool() { - THREAD_POOL = new ThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); + THREAD_POOL = new TestThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); } @Before diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index b92ba64c2fb..d6edb972ef4 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportService; @@ -76,7 +77,7 @@ public class TransportMasterNodeActionTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("TransportMasterNodeActionTests"); + threadPool = new TestThreadPool("TransportMasterNodeActionTests"); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index c53adb08ce2..1b0a6f8d622 100644 --- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.junit.After; @@ -162,7 +163,7 @@ public class TransportNodesActionTests extends ESTestCase { @BeforeClass public static void startThreadPool() { - THREAD_POOL = new ThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); + THREAD_POOL = new TestThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index ebaa5b5c01e..ea4e55c02f4 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; @@ -80,7 +81,7 @@ public class BroadcastReplicationTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("BroadcastReplicationTests"); + threadPool = new TestThreadPool("BroadcastReplicationTests"); circuitBreakerService = new NoneCircuitBreakerService(); } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index afa72ec7526..8ee2499d040 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -57,6 +57,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -109,7 +110,7 @@ public class TransportReplicationActionTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("ShardReplicationTests"); + threadPool = new TestThreadPool("ShardReplicationTests"); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 10132af1b1d..6d7ac6128c9 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; @@ -133,7 +134,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { @BeforeClass public static void startThreadPool() { - THREAD_POOL = new ThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); + THREAD_POOL = new TestThreadPool(TransportInstanceSingleOperationActionTests.class.getSimpleName()); } @Before diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index b566d764231..0dddb301444 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BaseTransportResponseHandler; import org.elasticsearch.transport.TransportException; @@ -63,7 +64,7 @@ public class TransportClientNodesServiceTests extends ESTestCase { TestIteration() { ClusterName clusterName = new ClusterName("test"); - threadPool = new ThreadPool("transport-client-nodes-service-tests"); + threadPool = new TestThreadPool("transport-client-nodes-service-tests"); transport = new FailAndRetryMockTransport(random(), clusterName) { @Override public List getLocalAddresses() { diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 261da3e2bb8..bfc3929dd14 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.NodeDisconnectedException; import org.elasticsearch.transport.NodeNotConnectedException; @@ -97,7 +98,7 @@ public class ShardStateActionTests extends ESTestCase { @BeforeClass public static void startThreadPool() { - THREAD_POOL = new ThreadPool("ShardStateActionTest"); + THREAD_POOL = new TestThreadPool("ShardStateActionTest"); } @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index dde35e33993..e99c862bfec 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.junit.After; @@ -72,7 +73,7 @@ public class ClusterStateHealthTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("ClusterStateHealthTests"); + threadPool = new TestThreadPool("ClusterStateHealthTests"); } @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index 630e5b034af..2e1cdea56a4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.test.ESAllocationTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -73,7 +74,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { @Before public void createDelayedAllocationService() { - threadPool = new ThreadPool(getTestName()); + threadPool = new TestThreadPool(getTestName()); clusterService = mock(ClusterService.class); allocationService = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); delayedAllocationService = new TestDelayAllocationService(Settings.EMPTY, threadPool, clusterService, allocationService); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index 81fc9d3752d..bf85f67e48e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.AfterClass; @@ -76,7 +77,7 @@ public class ClusterServiceTests extends ESTestCase { @BeforeClass public static void createThreadPool() { - threadPool = new ThreadPool(ClusterServiceTests.class.getName()); + threadPool = new TestThreadPool(ClusterServiceTests.class.getName()); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 50b7d5f775c..df51e6e2e0d 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -244,7 +245,7 @@ public class PrioritizedExecutorsTests extends ESTestCase { } public void testTimeoutCleanup() throws Exception { - ThreadPool threadPool = new ThreadPool("test"); + ThreadPool threadPool = new TestThreadPool("test"); final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler(); final AtomicBoolean timeoutCalled = new AtomicBoolean(); PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()), holder); diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index d3da4513d95..ea81e958342 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportRequestOptions; @@ -85,7 +86,7 @@ public class ZenFaultDetectionTests extends ESTestCase { .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), new ByteSizeValue(0)) .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool = new ThreadPool(getClass().getName()); + threadPool = new TestThreadPool(getClass().getName()); clusterServiceA = createClusterService(threadPool); clusterServiceB = createClusterService(threadPool); circuitBreakerService = new HierarchyCircuitBreakerService(settings, clusterSettings); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 651eab421ad..b7db9f9e609 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.AfterClass; @@ -81,7 +82,7 @@ public class NodeJoinControllerTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("ShardReplicationTests"); + threadPool = new TestThreadPool("ShardReplicationTests"); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java index 7847d7027d0..2845e6a5bf0 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportService; @@ -60,7 +61,7 @@ public class UnicastZenPingIT extends ESTestCase { int endPort = startPort + 10; settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), startPort + "-" + endPort).build(); - ThreadPool threadPool = new ThreadPool(getClass().getName()); + ThreadPool threadPool = new TestThreadPool(getClass().getName()); ClusterName test = new ClusterName("test"); ClusterName mismatch = new ClusterName("mismatch"); NetworkService networkService = new NetworkService(settings); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index e6b160eabf8..7cc319c8579 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.TransportChannel; @@ -217,7 +218,7 @@ public class PublishClusterStateActionTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - threadPool = new ThreadPool(getClass().getName()); + threadPool = new TestThreadPool(getClass().getName()); } @Override diff --git a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java index b710aa50ee0..9ce2aa44ab6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -60,7 +61,7 @@ public class AsyncShardFetchTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - this.threadPool = new ThreadPool(getTestName()); + this.threadPool = new TestThreadPool(getTestName()); this.test = new TestFetch(threadPool); } diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java index e7e3c41820b..a56e9993434 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpChannelTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; @@ -70,7 +71,7 @@ public class NettyHttpChannelTests extends ESTestCase { @Before public void setup() throws Exception { networkService = new NetworkService(Settings.EMPTY); - threadPool = new ThreadPool("test"); + threadPool = new TestThreadPool("test"); bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); } diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java index db48a56a122..6fc9a4e674a 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent; import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; @@ -73,7 +74,7 @@ public class NettyHttpServerPipeliningTests extends ESTestCase { @Before public void setup() throws Exception { networkService = new NetworkService(Settings.EMPTY); - threadPool = new ThreadPool("test"); + threadPool = new TestThreadPool("test"); bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); } diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerTransportTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerTransportTests.java index da548c93985..3cf9c1aa029 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerTransportTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerTransportTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.http.netty.cors.CorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.jboss.netty.handler.codec.http.HttpMethod; import org.junit.After; @@ -54,7 +55,7 @@ public class NettyHttpServerTransportTests extends ESTestCase { @Before public void setup() throws Exception { networkService = new NetworkService(Settings.EMPTY); - threadPool = new ThreadPool("test"); + threadPool = new TestThreadPool("test"); bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); } diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 38b7341cd24..f552ab9cf17 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -76,6 +76,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.test.engine.MockEngineFactory; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -110,7 +111,7 @@ public class IndexModuleTests extends ESTestCase { static NodeServicesProvider newNodeServiceProvider(Settings settings, Environment environment, Client client, ScriptEngineService... scriptEngineServices) throws IOException { // TODO this can be used in other place too - lets first refactor the IndicesQueriesRegistry - ThreadPool threadPool = new ThreadPool("test"); + ThreadPool threadPool = new TestThreadPool("test"); CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); BigArrays bigArrays = new BigArrays(settings, circuitBreakerService); Set scriptEngines = Collections.emptySet(); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index b7da3860003..05428ee3cd5 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -98,6 +98,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.junit.After; @@ -173,7 +174,7 @@ public class InternalEngineTests extends ESTestCase { .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) .build()); // TODO randomize more settings - threadPool = new ThreadPool(getClass().getName()); + threadPool = new TestThreadPool(getClass().getName()); store = createStore(); storeReplica = createStore(); Lucene.cleanLuceneIndex(store.directory()); diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index bed597b7cc5..ef443d1e102 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.junit.After; @@ -126,7 +127,7 @@ public class ShadowEngineTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build()); // TODO randomize more settings - threadPool = new ThreadPool(getClass().getName()); + threadPool = new TestThreadPool(getClass().getName()); dirPath = createTempDir(); store = createStore(dirPath); storeReplica = createStore(dirPath); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 2887d9b2561..bd6decfb60e 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; @@ -177,7 +178,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase { } private void doTestRequireDocValues(MappedFieldType ft) { - ThreadPool threadPool = new ThreadPool("random_threadpool_name"); + ThreadPool threadPool = new TestThreadPool("random_threadpool_name"); try { IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java index 26399218829..8960f9dfe6e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; @@ -64,7 +65,7 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { @BeforeClass public static void createThreadPool() { - THREAD_POOL = new ThreadPool("DynamicMappingDisabledTests"); + THREAD_POOL = new TestThreadPool("DynamicMappingDisabledTests"); } @Override diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index f877ae6629b..f197073033d 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.junit.After; @@ -74,7 +75,7 @@ public class IndicesStoreTests extends ESTestCase { @BeforeClass public static void beforeClass() { - threadPool = new ThreadPool("ShardReplicationTests"); + threadPool = new TestThreadPool("ShardReplicationTests"); } @AfterClass diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java index 97a2b065627..a1f4d381911 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.AbstractMap; @@ -128,7 +129,7 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { assert constructionShouldFail == (asserts == null); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(JvmGcMonitorServiceSettingsTests.class.getCanonicalName()) { + threadPool = new TestThreadPool(JvmGcMonitorServiceSettingsTests.class.getCanonicalName()) { @Override public ScheduledFuture scheduleWithFixedDelay(Runnable command, TimeValue interval) { return scheduler.apply(command, interval); diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 5e1dc740f9e..e0eed8387a6 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -55,9 +55,10 @@ public class NativeScriptTests extends ESTestCase { ScriptModule scriptModule = new ScriptModule(); scriptModule.prepareSettings(settingsModule); scriptModule.registerScript("my", MyNativeScriptFactory.class); + final ThreadPool threadPool = new ThreadPool(settings); Injector injector = new ModulesBuilder().add( new EnvironmentModule(new Environment(settings)), - new ThreadPoolModule(new ThreadPool(settings)), + new ThreadPoolModule(threadPool), new SettingsModule(settings), scriptModule).createInjector(); diff --git a/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java b/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java index 68b4d980928..2bb39ad10ea 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java +++ b/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java @@ -36,8 +36,8 @@ public class SearchWithRejectionsIT extends ESIntegTestCase { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put("threadpool.search.size", 1) - .put("threadpool.search.queue_size", 1) + .put("thread_pool.search.size", 1) + .put("thread_pool.search.queue_size", 1) .build(); } diff --git a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 85daebb86d5..48ea8b6c8c9 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.threadpool; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -42,8 +41,8 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase { final Settings nodeSettings = Settings.builder() .put("node.name", "testRejectedExecutionCounter") - .put("threadpool." + threadPoolName + ".size", size) - .put("threadpool." + threadPoolName + ".queue_size", queueSize) + .put("thread_pool." + threadPoolName + ".size", size) + .put("thread_pool." + threadPoolName + ".queue_size", queueSize) .build(); try { threadPool = new ThreadPool(nodeSettings); @@ -86,18 +85,6 @@ public class FixedThreadPoolTests extends ESThreadPoolTestCase { assertThat(counter, equalTo(rejections)); assertThat(stats(threadPool, threadPoolName).getRejected(), equalTo(rejections)); - - // the rejected execution count resets to zero when the - // queue is resized - final ClusterSettings clusterSettings = - new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); - clusterSettings.applySettings( - Settings.builder() - .put("threadpool." + threadPoolName + ".queue_size", queueSize + 1) - .build()); - assertThat(stats(threadPool, threadPoolName).getRejected(), equalTo(0L)); - } finally { terminateThreadPoolIfNeeded(threadPool); } diff --git a/core/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index 2212f162eb6..d065abb884c 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -40,35 +40,35 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); - final int min; + final int core; if (randomBoolean()) { - min = randomIntBetween(0, 8); - builder.put("threadpool." + threadPoolName + ".min", min); + core = randomIntBetween(0, 8); + builder.put("thread_pool." + threadPoolName + ".core", core); } else { - min = "generic".equals(threadPoolName) ? 4 : 1; // the defaults + core = "generic".equals(threadPoolName) ? 4 : 1; // the defaults } - final int sizeBasedOnNumberOfProcessors; + final int maxBasedOnNumberOfProcessors; if (randomBoolean()) { final int processors = randomIntBetween(1, 64); - sizeBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); + maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); builder.put("processors", processors); } else { - sizeBasedOnNumberOfProcessors = expectedSize(threadPoolName, Math.min(32, Runtime.getRuntime().availableProcessors())); + maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, Math.min(32, Runtime.getRuntime().availableProcessors())); } - final int expectedSize; - if (sizeBasedOnNumberOfProcessors < min || randomBoolean()) { - expectedSize = randomIntBetween(Math.max(1, min), 16); - builder.put("threadpool." + threadPoolName + ".size", expectedSize); + final int expectedMax; + if (maxBasedOnNumberOfProcessors < core || randomBoolean()) { + expectedMax = randomIntBetween(Math.max(1, core), 16); + builder.put("thread_pool." + threadPoolName + ".max", expectedMax); } else { - expectedSize = sizeBasedOnNumberOfProcessors; + expectedMax = maxBasedOnNumberOfProcessors; } final long keepAlive; if (randomBoolean()) { keepAlive = randomIntBetween(1, 300); - builder.put("threadpool." + threadPoolName + ".keep_alive", keepAlive + "s"); + builder.put("thread_pool." + threadPoolName + ".keep_alive", keepAlive + "s"); } else { keepAlive = "generic".equals(threadPoolName) ? 30 : 300; // the defaults } @@ -88,10 +88,10 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { assertNull(info.getQueueSize()); assertThat(esThreadPoolExecutor.getQueue().remainingCapacity(), equalTo(Integer.MAX_VALUE)); - assertThat(info.getMin(), equalTo(min)); - assertThat(esThreadPoolExecutor.getCorePoolSize(), equalTo(min)); - assertThat(info.getMax(), equalTo(expectedSize)); - assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedSize)); + assertThat(info.getMin(), equalTo(core)); + assertThat(esThreadPoolExecutor.getCorePoolSize(), equalTo(core)); + assertThat(info.getMax(), equalTo(expectedMax)); + assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax)); }); } @@ -113,23 +113,10 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { return sizes.get(threadPoolName).size(numberOfProcessors); } - public void testValidDynamicKeepAlive() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); - runScalingThreadPoolTest(Settings.EMPTY, (clusterSettings, threadPool) -> { - final Executor beforeExecutor = threadPool.executor(threadPoolName); - final long seconds = randomIntBetween(1, 300); - clusterSettings.applySettings(settings("threadpool." + threadPoolName + ".keep_alive", seconds + "s")); - final Executor afterExecutor = threadPool.executor(threadPoolName); - assertSame(beforeExecutor, afterExecutor); - final ThreadPool.Info info = info(threadPool, threadPoolName); - assertThat(info.getKeepAlive().seconds(), equalTo(seconds)); - }); - } - public void testScalingThreadPoolIsBounded() throws InterruptedException { final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int size = randomIntBetween(32, 512); - final Settings settings = Settings.builder().put("threadpool." + threadPoolName + ".size", size).build(); + final Settings settings = Settings.builder().put("thread_pool." + threadPoolName + ".max", size).build(); runScalingThreadPoolTest(settings, (clusterSettings, threadPool) -> { final CountDownLatch latch = new CountDownLatch(1); final int numberOfTasks = 2 * size; @@ -161,8 +148,8 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { final int min = "generic".equals(threadPoolName) ? 4 : 1; final Settings settings = Settings.builder() - .put("threadpool." + threadPoolName + ".size", 128) - .put("threadpool." + threadPoolName + ".keep_alive", "1ms") + .put("thread_pool." + threadPoolName + ".max", 128) + .put("thread_pool." + threadPoolName + ".keep_alive", "1ms") .build(); runScalingThreadPoolTest(settings, ((clusterSettings, threadPool) -> { final CountDownLatch latch = new CountDownLatch(1); @@ -197,40 +184,6 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { })); } - public void testDynamicThreadPoolSize() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); - runScalingThreadPoolTest(Settings.EMPTY, (clusterSettings, threadPool) -> { - final Executor beforeExecutor = threadPool.executor(threadPoolName); - int expectedMin = "generic".equals(threadPoolName) ? 4 : 1; - final int size = randomIntBetween(expectedMin, Integer.MAX_VALUE); - clusterSettings.applySettings(settings("threadpool." + threadPoolName + ".size", size)); - final Executor afterExecutor = threadPool.executor(threadPoolName); - assertSame(beforeExecutor, afterExecutor); - final ThreadPool.Info info = info(threadPool, threadPoolName); - assertThat(info.getMin(), equalTo(expectedMin)); - assertThat(info.getMax(), equalTo(size)); - - assertThat(afterExecutor, instanceOf(EsThreadPoolExecutor.class)); - final EsThreadPoolExecutor executor = (EsThreadPoolExecutor)afterExecutor; - assertThat(executor.getCorePoolSize(), equalTo(expectedMin)); - assertThat(executor.getMaximumPoolSize(), equalTo(size)); - }); - } - - public void testResizingScalingThreadPoolQueue() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); - runScalingThreadPoolTest(Settings.EMPTY, (clusterSettings, threadPool) -> { - final int size = randomIntBetween(1, Integer.MAX_VALUE); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings(settings("threadpool." + threadPoolName + ".queue_size", size))); - assertThat(e, hasToString( - "java.lang.IllegalArgumentException: thread pool [" + threadPoolName + - "] of type scaling can not have its queue re-sized but was [" + - size + "]")); - }); - } - public void runScalingThreadPoolTest( final Settings settings, final BiConsumer consumer) throws InterruptedException { @@ -240,7 +193,6 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { final Settings nodeSettings = Settings.builder().put(settings).put("node.name", test).build(); threadPool = new ThreadPool(nodeSettings); final ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); consumer.accept(clusterSettings, threadPool); } finally { terminateThreadPoolIfNeeded(threadPool); diff --git a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index baf017bc7b9..28267e9beb7 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -125,63 +125,6 @@ public class SimpleThreadPoolIT extends ESIntegTestCase { } } - public void testUpdatingThreadPoolSettings() throws Exception { - internalCluster().startNodesAsync(2).get(); - ThreadPool threadPool = internalCluster().getDataNodeInstance(ThreadPool.class); - // Check that settings are changed - assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getQueue().remainingCapacity(), equalTo(1000)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("threadpool.search.queue_size", 2000).build()).execute().actionGet(); - assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getQueue().remainingCapacity(), equalTo(2000)); - - // Make sure that threads continue executing when executor is replaced - final CyclicBarrier barrier = new CyclicBarrier(2); - Executor oldExecutor = threadPool.executor(Names.SEARCH); - threadPool.executor(Names.SEARCH).execute(() -> { - try { - barrier.await(); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } catch (BrokenBarrierException ex) { - // - } - }); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("threadpool.search.queue_size", 1000).build()).execute().actionGet(); - assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor))); - assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true)); - assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true)); - assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false)); - barrier.await(10, TimeUnit.SECONDS); - - // Make sure that new thread executor is functional - threadPool.executor(Names.SEARCH).execute(() -> { - try { - barrier.await(); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } catch (BrokenBarrierException ex) { - // - } - } - ); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("threadpool.search.queue_size", 500)).execute().actionGet(); - barrier.await(10, TimeUnit.SECONDS); - - // Check that node info is correct - NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().all().execute().actionGet(); - assertEquals(2, nodesInfoResponse.getNodes().size()); - for (NodeInfo nodeInfo : nodesInfoResponse.getNodes()) { - boolean found = false; - for (ThreadPool.Info info : nodeInfo.getThreadPool()) { - if (info.getName().equals(Names.SEARCH)) { - assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); - found = true; - break; - } - } - assertThat(found, equalTo(true)); - } - } - public void testThreadPoolLeakingThreadsWithTribeNode() { Settings settings = Settings.builder() .put("node.name", "thread_pool_leaking_threads_tribe_node") diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java index 19dd25a7e25..486b0635c64 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java @@ -96,7 +96,7 @@ public class ThreadPoolSerializationTests extends ESTestCase { } public void testThatNegativeSettingAllowsToStart() throws InterruptedException { - Settings settings = Settings.builder().put("node.name", "index").put("threadpool.index.queue_size", "-1").build(); + Settings settings = Settings.builder().put("node.name", "index").put("thread_pool.index.queue_size", "-1").build(); ThreadPool threadPool = new ThreadPool(settings); assertThat(threadPool.info("index").getQueueSize(), is(nullValue())); terminate(threadPool); diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 43e8e7e7af5..87accf057ad 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -19,32 +19,24 @@ package org.elasticsearch.threadpool; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.threadpool.ThreadPool.Names; import java.lang.reflect.Field; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Executor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; -/** - */ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { public void testCorrectThreadPoolTypePermittedInSettings() throws InterruptedException { @@ -53,12 +45,12 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { ThreadPool threadPool = null; try { threadPool = new ThreadPool(Settings.builder() - .put("node.name", "testCorrectThreadPoolTypePermittedInSettings") - .put("threadpool." + threadPoolName + ".type", correctThreadPoolType.getType()) - .build()); + .put("node.name", "testCorrectThreadPoolTypePermittedInSettings") + .put("thread_pool." + threadPoolName + ".type", correctThreadPoolType.getType()) + .build()); ThreadPool.Info info = info(threadPool, threadPoolName); if (ThreadPool.Names.SAME.equals(threadPoolName)) { - assertNull(info); // we don't report on the "same" threadpool + assertNull(info); // we don't report on the "same" thread pool } else { // otherwise check we have the expected type assertEquals(info.getThreadPoolType(), correctThreadPoolType); @@ -68,97 +60,31 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { } } - public void testThreadPoolCanNotOverrideThreadPoolType() throws InterruptedException { - String threadPoolName = randomThreadPoolName(); - ThreadPool.ThreadPoolType incorrectThreadPoolType = randomIncorrectThreadPoolType(threadPoolName); - ThreadPool.ThreadPoolType correctThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPoolName); - ThreadPool threadPool = null; - try { - threadPool = new ThreadPool( - Settings.builder() - .put("node.name", "testThreadPoolCanNotOverrideThreadPoolType") - .put("threadpool." + threadPoolName + ".type", incorrectThreadPoolType.getType()) - .build()); - terminate(threadPool); - fail("expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertThat( - e.getMessage(), - is("setting threadpool." + threadPoolName + ".type to " + incorrectThreadPoolType.getType() + " is not permitted; must be " + correctThreadPoolType.getType())); - } finally { - terminateThreadPoolIfNeeded(threadPool); - } - } - public void testIndexingThreadPoolsMaxSize() throws InterruptedException { - String threadPoolName = randomThreadPoolName(); - for (String name : new String[] {ThreadPool.Names.BULK, ThreadPool.Names.INDEX}) { - ThreadPool threadPool = null; - try { + final String name = randomFrom(Names.BULK, Names.INDEX); + final int maxSize = 1 + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY); + final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE); - int maxSize = EsExecutors.boundedNumberOfProcessors(Settings.EMPTY); + // try to create a too big thread pool + final IllegalArgumentException initial = + expectThrows( + IllegalArgumentException.class, + () -> { + ThreadPool tp = null; + try { + tp = new ThreadPool(Settings.builder() + .put("node.name", "testIndexingThreadPoolsMaxSize") + .put("thread_pool." + name + ".size", tooBig) + .build()); + } finally { + terminateThreadPoolIfNeeded(tp); + } + }); - // try to create a too-big (maxSize+1) thread pool - threadPool = new ThreadPool(Settings.builder() - .put("node.name", "testIndexingThreadPoolsMaxSize") - .put("threadpool." + name + ".size", maxSize+1) - .build()); - - // confirm it clipped us at the maxSize: - assertEquals(maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize()); - - ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); - - // update it to a tiny size: - clusterSettings.applySettings( - Settings.builder() - .put("threadpool." + name + ".size", 1) - .build() - ); - - // confirm it worked: - assertEquals(1, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize()); - - // try to update to too-big size: - clusterSettings.applySettings( - Settings.builder() - .put("threadpool." + name + ".size", maxSize+1) - .build() - ); - - // confirm it clipped us at the maxSize: - assertEquals(maxSize, ((ThreadPoolExecutor) threadPool.executor(name)).getMaximumPoolSize()); - } finally { - terminateThreadPoolIfNeeded(threadPool); - } - } - } - - public void testUpdateSettingsCanNotChangeThreadPoolType() throws InterruptedException { - String threadPoolName = randomThreadPoolName(); - ThreadPool.ThreadPoolType invalidThreadPoolType = randomIncorrectThreadPoolType(threadPoolName); - ThreadPool.ThreadPoolType validThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPoolName); - ThreadPool threadPool = null; - try { - threadPool = new ThreadPool(Settings.builder().put("node.name", "testUpdateSettingsCanNotChangeThreadPoolType").build()); - ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); - - clusterSettings.applySettings( - Settings.builder() - .put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType()) - .build() - ); - fail("expected IllegalArgumentException"); - } catch (IllegalArgumentException e) { - assertEquals("illegal value can't update [threadpool.] from [{}] to [{" + threadPoolName + ".type=" + invalidThreadPoolType.getType() + "}]", e.getMessage()); - assertThat( - e.getCause().getMessage(), - is("setting threadpool." + threadPoolName + ".type to " + invalidThreadPoolType.getType() + " is not permitted; must be " + validThreadPoolType.getType())); - } finally { - terminateThreadPoolIfNeeded(threadPool); - } + assertThat( + initial, + hasToString(containsString( + "Failed to parse value [" + tooBig + "] for setting [thread_pool." + name + ".size] must be "))); } private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { @@ -174,17 +100,14 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { ThreadPool threadPool = null; try { + int expectedSize = getExpectedThreadPoolSize(Settings.EMPTY, threadPoolName, 15); Settings nodeSettings = Settings.builder() - .put("node.name", "testFixedExecutorType").build(); + .put("node.name", "testFixedExecutorType") + .put("thread_pool." + threadPoolName + ".size", expectedSize) + .build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - Settings settings = clusterSettings.applySettings(Settings.builder() - .put("threadpool." + threadPoolName + ".size", "15") - .build()); - int expectedSize = getExpectedThreadPoolSize(nodeSettings, threadPoolName, 15); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); @@ -193,37 +116,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); // keep alive does not apply to fixed thread pools assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); - - // Put old type back - settings = clusterSettings.applySettings(Settings.EMPTY); - assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); - // Make sure keep alive value is not used - assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue()); - // Make sure keep pool size value were reused - assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize)); - assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); - assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize)); - - // Change size - Executor oldExecutor = threadPool.executor(threadPoolName); - settings = clusterSettings.applySettings(Settings.builder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); - - expectedSize = getExpectedThreadPoolSize(nodeSettings, threadPoolName, 10); - - // Make sure size values changed - assertThat(info(threadPool, threadPoolName).getMax(), equalTo(expectedSize)); - assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(expectedSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(expectedSize)); - // Make sure executor didn't change - assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); - assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); - - // Change queue capacity - clusterSettings.applySettings(Settings.builder().put(settings).put("threadpool." + threadPoolName + ".queue", "500") - .build()); } finally { terminateThreadPoolIfNeeded(threadPool); } @@ -234,11 +126,10 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { ThreadPool threadPool = null; try { Settings nodeSettings = Settings.builder() - .put("threadpool." + threadPoolName + ".size", 10) - .put("node.name", "testScalingExecutorType").build(); + .put("thread_pool." + threadPoolName + ".max", 10) + .put("node.name", "testScalingExecutorType") + .build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); final int expectedMinimum = "generic".equals(threadPoolName) ? 4 : 1; assertThat(info(threadPool, threadPoolName).getMin(), equalTo(expectedMinimum)); assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); @@ -246,24 +137,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { assertThat(info(threadPool, threadPoolName).getKeepAlive().seconds(), equalTo(expectedKeepAlive)); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - - // Change settings that doesn't require pool replacement - Executor oldExecutor = threadPool.executor(threadPoolName); - clusterSettings.applySettings(Settings.builder() - .put("threadpool." + threadPoolName + ".keep_alive", "10m") - .put("threadpool." + threadPoolName + ".min", "2") - .put("threadpool." + threadPoolName + ".size", "15") - .build()); - assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING); - assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getCorePoolSize(), equalTo(2)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getMaximumPoolSize(), equalTo(15)); - assertThat(info(threadPool, threadPoolName).getMin(), equalTo(2)); - assertThat(info(threadPool, threadPoolName).getMax(), equalTo(15)); - // Make sure keep alive value changed - assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(10L)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L)); - assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); } finally { terminateThreadPoolIfNeeded(threadPool); } @@ -274,17 +147,18 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { ThreadPool threadPool = null; try { Settings nodeSettings = Settings.builder() - .put("threadpool." + threadPoolName + ".queue_size", 1000) - .put("node.name", "testShutdownNowInterrupts").build(); + .put("thread_pool." + threadPoolName + ".queue_size", 1000) + .put("node.name", "testShutdownNowInterrupts") + .build(); threadPool = new ThreadPool(nodeSettings); - ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L); + final CountDownLatch shutDownLatch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1); ThreadPoolExecutor oldExecutor = (ThreadPoolExecutor) threadPool.executor(threadPoolName); threadPool.executor(threadPoolName).execute(() -> { try { + shutDownLatch.countDown(); new CountDownLatch(1).await(); } catch (InterruptedException ex) { latch.countDown(); @@ -292,13 +166,11 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { } } ); - clusterSettings.applySettings(Settings.builder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); - assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor))); + shutDownLatch.await(); + threadPool.shutdownNow(); + latch.await(3, TimeUnit.SECONDS); // if this throws then ThreadPool#shutdownNow did not interrupt assertThat(oldExecutor.isShutdown(), equalTo(true)); - assertThat(oldExecutor.isTerminating(), equalTo(true)); - assertThat(oldExecutor.isTerminated(), equalTo(false)); - threadPool.shutdownNow(); // should interrupt the thread - latch.await(3, TimeUnit.SECONDS); // If this throws then ThreadPool#shutdownNow didn't interrupt + assertThat(oldExecutor.isTerminating() || oldExecutor.isTerminated(), equalTo(true)); } finally { terminateThreadPoolIfNeeded(threadPool); } @@ -307,18 +179,19 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { public void testCustomThreadPool() throws Exception { ThreadPool threadPool = null; try { - Settings nodeSettings = Settings.builder() - .put("threadpool.my_pool1.type", "scaling") - .put("threadpool.my_pool1.min", 1) - .put("threadpool.my_pool1.size", EsExecutors.boundedNumberOfProcessors(Settings.EMPTY)) - .put("threadpool.my_pool1.keep_alive", "1m") - .put("threadpool.my_pool2.type", "fixed") - .put("threadpool.my_pool2.size", "1") - .put("threadpool.my_pool2.queue_size", "1") - .put("node.name", "testCustomThreadPool").build(); - threadPool = new ThreadPool(nodeSettings); - ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - threadPool.setClusterSettings(clusterSettings); + + + final ScalingExecutorBuilder scaling = + new ScalingExecutorBuilder( + "my_pool1", + 1, + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY), + TimeValue.timeValueMinutes(1)); + + final FixedExecutorBuilder fixed = new FixedExecutorBuilder(Settings.EMPTY, "my_pool2", 1, 1); + + threadPool = new ThreadPool(Settings.builder().put("node.name", "testCustomThreadPool").build(), scaling, fixed); + ThreadPoolInfo groups = threadPool.info(); boolean foundPool1 = false; boolean foundPool2 = false; @@ -345,39 +218,6 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { } assertThat(foundPool1, is(true)); assertThat(foundPool2, is(true)); - - // Updating my_pool2 - Settings settings = Settings.builder() - .put("threadpool.my_pool2.size", "10") - .build(); - clusterSettings.applySettings(settings); - - groups = threadPool.info(); - foundPool1 = false; - foundPool2 = false; - outer: - for (ThreadPool.Info info : groups) { - if ("my_pool1".equals(info.getName())) { - foundPool1 = true; - assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.SCALING); - } else if ("my_pool2".equals(info.getName())) { - foundPool2 = true; - assertThat(info.getMax(), equalTo(10)); - assertThat(info.getMin(), equalTo(10)); - assertThat(info.getQueueSize().singles(), equalTo(1L)); - assertEquals(info.getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); - } else { - for (Field field : Names.class.getFields()) { - if (info.getName().equalsIgnoreCase(field.getName())) { - // This is ok it is a default thread pool - continue outer; - } - } - fail("Unexpected pool name: " + info.getName()); - } - } - assertThat(foundPool1, is(true)); - assertThat(foundPool2, is(true)); } finally { terminateThreadPoolIfNeeded(threadPool); } @@ -388,11 +228,4 @@ public class UpdateThreadPoolSettingsTests extends ESThreadPoolTestCase { return randomFrom(threadPoolNames.toArray(new String[threadPoolNames.size()])); } - private ThreadPool.ThreadPoolType randomIncorrectThreadPoolType(String threadPoolName) { - Set set = new HashSet<>(); - set.addAll(Arrays.asList(ThreadPool.ThreadPoolType.values())); - set.remove(ThreadPool.THREAD_POOL_TYPES.get(threadPoolName)); - return randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()])); - } - } diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index f285a1db52e..68c4c76d7e8 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -71,7 +72,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - threadPool = new ThreadPool(getClass().getName()); + threadPool = new TestThreadPool(getClass().getName()); serviceA = build( Settings.builder() .put("name", "TS_A") diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index ca5faf68b51..3c382c39509 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -64,7 +64,6 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { threadPool = new ThreadPool(settings); - threadPool.setClusterSettings(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); NetworkService networkService = new NetworkService(settings); BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry(), diff --git a/core/src/test/java/org/elasticsearch/transport/NettyTransportServiceHandshakeTests.java b/core/src/test/java/org/elasticsearch/transport/NettyTransportServiceHandshakeTests.java index 6a7e0a0de36..a111911e994 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettyTransportServiceHandshakeTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettyTransportServiceHandshakeTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty.NettyTransport; import org.junit.After; @@ -51,7 +52,7 @@ public class NettyTransportServiceHandshakeTests extends ESTestCase { @BeforeClass public static void startThreadPool() { - threadPool = new ThreadPool(NettyTransportServiceHandshakeTests.class.getSimpleName()); + threadPool = new TestThreadPool(NettyTransportServiceHandshakeTests.class.getSimpleName()); } private List transportServices = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index 8c00ae01b74..de156659082 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BaseTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -52,7 +53,7 @@ import static org.hamcrest.Matchers.greaterThan; */ public class NettyScheduledPingTests extends ESTestCase { public void testScheduledPing() throws Exception { - ThreadPool threadPool = new ThreadPool(getClass().getName()); + ThreadPool threadPool = new TestThreadPool(getClass().getName()); Settings settings = Settings.builder() .put(NettyTransport.PING_SCHEDULE.getKey(), "5ms") diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java index 352e7bb5a59..0ed0cf3ec97 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -56,7 +57,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .put("transport.profiles.client1.port", 0) .build(); - ThreadPool threadPool = new ThreadPool("tst"); + ThreadPool threadPool = new TestThreadPool("tst"); try (NettyTransport transport = startNettyTransport(settings, threadPool)) { assertEquals(1, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); @@ -72,7 +73,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .put("transport.profiles.client1.port", 0) .build(); - ThreadPool threadPool = new ThreadPool("tst"); + ThreadPool threadPool = new TestThreadPool("tst"); try (NettyTransport transport = startNettyTransport(settings, threadPool)) { assertEquals(1, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); @@ -89,7 +90,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .put("transport.profiles.client1.whatever", "foo") .build(); - ThreadPool threadPool = new ThreadPool("tst"); + ThreadPool threadPool = new TestThreadPool("tst"); try (NettyTransport transport = startNettyTransport(settings, threadPool)) { assertEquals(0, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); @@ -105,7 +106,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .put("transport.profiles.default.port", 0) .build(); - ThreadPool threadPool = new ThreadPool("tst"); + ThreadPool threadPool = new TestThreadPool("tst"); try (NettyTransport transport = startNettyTransport(settings, threadPool)) { assertEquals(0, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); @@ -123,7 +124,7 @@ public class NettyTransportMultiPortTests extends ESTestCase { .put("transport.profiles..port", 23) // will not actually bind to this .build(); - ThreadPool threadPool = new ThreadPool("tst"); + ThreadPool threadPool = new TestThreadPool("tst"); try (NettyTransport transport = startNettyTransport(settings, threadPool)) { assertEquals(0, transport.profileBoundAddresses().size()); assertEquals(1, transport.boundAddress().boundAddresses().length); diff --git a/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java b/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java index 6c6c45e9cfd..82a3a55868a 100644 --- a/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java +++ b/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.watcher; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -33,7 +34,7 @@ import static org.hamcrest.Matchers.notNullValue; */ public class ResourceWatcherServiceTests extends ESTestCase { public void testSettings() throws Exception { - ThreadPool threadPool = new ThreadPool("test"); + ThreadPool threadPool = new TestThreadPool("test"); // checking the defaults Settings settings = Settings.builder().build(); @@ -65,7 +66,7 @@ public class ResourceWatcherServiceTests extends ESTestCase { } public void testHandle() throws Exception { - ThreadPool threadPool = new ThreadPool("test"); + ThreadPool threadPool = new TestThreadPool("test"); Settings settings = Settings.builder().build(); ResourceWatcherService service = new ResourceWatcherService(settings, threadPool); ResourceWatcher watcher = new ResourceWatcher() { diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc index 5edd4e449e8..5702d51fc12 100644 --- a/docs/reference/migration/migrate_5_0/settings.asciidoc +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -79,6 +79,19 @@ on the thread pool type, `keep_alive`, `queue_size`, etc.). The `suggest` threadpool has been removed, now suggest requests use the `search` threadpool. +The prefix on all thread pool settings has been changed from +`threadpool` to `thread_pool`. + +The minimum size setting for a scaling thread pool has been changed +from `min` to `core`. + +The maximum size setting for a scaling thread pool has been changed +from `size` to `max`. + +The queue size setting for a fixed thread pool must be `queue_size` +(all other variants that were previously supported are no longer +supported). + ==== Analysis settings The `index.analysis.analyzer.default_index` analyzer is not supported anymore. diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 65069e25940..d8f8545c213 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -17,7 +17,7 @@ There are several thread pools, but the important ones include: For index/delete operations. Thread pool type is `fixed` with a size of `# of available processors`, queue_size of `200`. The maximum size for this pool - is `# of available processors`. + is `1 + # of available processors`. `search`:: For count/search/suggest operations. Thread pool type is `fixed` @@ -33,7 +33,7 @@ There are several thread pools, but the important ones include: For bulk operations. Thread pool type is `fixed` with a size of `# of available processors`, queue_size of `50`. The maximum size for this pool - is `# of available processors`. + is `1 + # of available processors`. `percolate`:: For percolate operations. Thread pool type is `fixed` @@ -42,26 +42,26 @@ There are several thread pools, but the important ones include: `snapshot`:: For snapshot/restore operations. Thread pool type is `scaling` with a - keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`. + keep-alive of `5m` and a max of `min(5, (# of available processors)/2)`. `warmer`:: For segment warm-up operations. Thread pool type is `scaling` with a - keep-alive of `5m` and a size of `min(5, (# of available processors)/2)`. + keep-alive of `5m` and a max of `min(5, (# of available processors)/2)`. `refresh`:: For refresh operations. Thread pool type is `scaling` with a - keep-alive of `5m` and a size of `min(10, (# of available processors)/2)`. + keep-alive of `5m` and a max of `min(10, (# of available processors)/2)`. `listener`:: Mainly for java client executing of action when listener threaded is set to true. - Thread pool type is `scaling` with a default size of `min(10, (# of available processors)/2)`. + Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `index` thread pool to have more threads: [source,js] -------------------------------------------------- -threadpool: +thread_pool: index: size: 30 -------------------------------------------------- @@ -91,7 +91,7 @@ full, it will abort the request. [source,js] -------------------------------------------------- -threadpool: +thread_pool: index: size: 30 queue_size: 1000 @@ -102,17 +102,17 @@ threadpool: The `scaling` thread pool holds a dynamic number of threads. This number is proportional to the workload and varies between the value of -the `min` and `size` parameters. +the `core` and `max` parameters. The `keep_alive` parameter determines how long a thread should be kept around in the thread pool without it doing any work. [source,js] -------------------------------------------------- -threadpool: +thread_pool: warmer: - min: 1 - size: 8 + core: 1 + max: 8 keep_alive: 2m -------------------------------------------------- diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java index 9aa5e1892ef..ddb330b01a7 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests/TemplateQueryParserTests.java @@ -108,10 +108,11 @@ public class TemplateQueryParserTests extends ESTestCase { // TODO: make this use a mock engine instead of mustache and it will no longer be messy! scriptModule.addScriptEngine(new ScriptEngineRegistry.ScriptEngineRegistration(MustacheScriptEngineService.class, MustacheScriptEngineService.NAME, true)); settingsModule.registerSetting(InternalSettingsPlugin.VERSION_CREATED); + final ThreadPool threadPool = new ThreadPool(settings); injector = new ModulesBuilder().add( new EnvironmentModule(new Environment(settings)), settingsModule, - new ThreadPoolModule(new ThreadPool(settings)), + new ThreadPoolModule(threadPool), new SearchModule(settings, new NamedWriteableRegistry()) { @Override protected void configureSearch() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java index b9489e9f5d9..66c636f4f1d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionTestCase.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -35,7 +36,7 @@ public abstract class AbstractAsyncBulkIndexByScrollActionTestCase< @Before public void setupForTest() { - threadPool = new ThreadPool(getTestName()); + threadPool = new TestThreadPool(getTestName()); task = new BulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID, 0); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 9ce505bacd3..2af63deb3a9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -72,6 +72,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -126,7 +127,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { @Before public void setupForTest() { client = new MyMockClient(new NoOpClient(getTestName())); - threadPool = new ThreadPool(getTestName()); + threadPool = new TestThreadPool(getTestName()); firstSearchRequest = new SearchRequest(); testRequest = new DummyAbstractBulkByScrollRequest(firstSearchRequest); listener = new PlainActionFuture<>(); @@ -311,7 +312,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testThreadPoolRejectionsAbortRequest() throws Exception { TimeValue expectedDelay = parseTimeValue(randomPositiveTimeValue(), "test"); threadPool.shutdown(); - threadPool = new ThreadPool(getTestName()) { + threadPool = new TestThreadPool(getTestName()) { @Override public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { assertEquals(expectedDelay, delay); // While we're here we can check that the sleep made it through @@ -444,7 +445,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { AtomicReference capturedDelay = new AtomicReference<>(); AtomicReference capturedCommand = new AtomicReference<>(); threadPool.shutdown(); - threadPool = new ThreadPool(getTestName()) { + threadPool = new TestThreadPool(getTestName()) { @Override public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { capturedDelay.set(delay); @@ -612,7 +613,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { * is a delay. */ threadPool.shutdown(); - threadPool = new ThreadPool(getTestName()) { + threadPool = new TestThreadPool(getTestName()) { @Override public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { /* diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java index d64c69ba362..fd1a17a439d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Before; @@ -168,7 +169,7 @@ public class BulkByScrollTaskTests extends ESTestCase { task.rethrottle(originalRequestsPerSecond); TimeValue maxDelay = timeValueSeconds(between(1, 5)); assertThat(maxDelay.nanos(), greaterThanOrEqualTo(0L)); - ThreadPool threadPool = new ThreadPool(getTestName()) { + ThreadPool threadPool = new TestThreadPool(getTestName()) { @Override public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos()))); @@ -220,7 +221,7 @@ public class BulkByScrollTaskTests extends ESTestCase { public void testDelayNeverNegative() throws IOException { // Thread pool that returns a ScheduledFuture that claims to have a negative delay - ThreadPool threadPool = new ThreadPool("test") { + ThreadPool threadPool = new TestThreadPool("test") { public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { return new ScheduledFuture() { @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index 699ba483e30..09945c9372b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -63,11 +63,11 @@ public class RetryTests extends ESSingleNodeTestCase { protected Settings nodeSettings() { Settings.Builder settings = Settings.builder().put(super.nodeSettings()); // Use pools of size 1 so we can block them - settings.put("threadpool.bulk.size", 1); - settings.put("threadpool.search.size", 1); + settings.put("thread_pool.bulk.size", 1); + settings.put("thread_pool.search.size", 1); // Use queues of size 1 because size 0 is broken and because search requests need the queue to function - settings.put("threadpool.bulk.queue_size", 1); - settings.put("threadpool.search.queue_size", 1); + settings.put("thread_pool.bulk.queue_size", 1); + settings.put("thread_pool.search.queue_size", 1); return settings.build(); } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index c4863680613..956732113c1 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.junit.AfterClass; @@ -53,7 +54,7 @@ public class Ec2DiscoveryTests extends ESTestCase { @BeforeClass public static void createThreadPool() { - threadPool = new ThreadPool(Ec2DiscoveryTests.class.getName()); + threadPool = new TestThreadPool(Ec2DiscoveryTests.class.getName()); } @AfterClass diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index 4525b1ece1d..78d96fd0163 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.AfterClass; @@ -72,7 +73,7 @@ public class GceDiscoveryTests extends ESTestCase { @BeforeClass public static void createThreadPool() { - threadPool = new ThreadPool(GceDiscoveryTests.class.getName()); + threadPool = new TestThreadPool(GceDiscoveryTests.class.getName()); } @AfterClass diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 0859a8e86ea..8f3fd5010e6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -28,7 +28,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java index 5f2237640e6..6ff45608700 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.TimeUnit; @@ -34,7 +35,7 @@ import java.util.concurrent.TimeUnit; public class NoOpClient extends AbstractClient { public NoOpClient(String testName) { - super(Settings.EMPTY, new ThreadPool(testName)); + super(Settings.EMPTY, new TestThreadPool(testName)); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java new file mode 100644 index 00000000000..0d525f7f59a --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; + +public class TestThreadPool extends ThreadPool { + + public TestThreadPool(String name) { + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).build()); + } + +}