From 9411f18f27a023b28f28aaa6af904bc519cca8fa Mon Sep 17 00:00:00 2001 From: GUILLAUME GROSSETIE Date: Wed, 1 Jun 2016 17:14:13 +0200 Subject: [PATCH 01/18] Display plugins versions This is useful to determine if a plugin needs to be updated when using deployment automation solution (like Ansible). --- .../plugins/ListPluginsCommand.java | 2 +- .../plugins/ListPluginsCommandTests.java | 54 +++++++++---------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index bd2f853bac0..ee81261c080 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -60,8 +60,8 @@ class ListPluginsCommand extends SettingCommand { } Collections.sort(plugins); for (final Path plugin : plugins) { - terminal.println(plugin.getFileName().toString()); PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); + terminal.println(plugin.getFileName().toString() + "@" + info.getVersion()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString()); } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index 1422280165c..ddac8f66209 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -59,7 +59,7 @@ public class ListPluginsCommandTests extends ESTestCase { static MockTerminal listPlugins(Path home) throws Exception { return listPlugins(home, new String[0]); } - + static MockTerminal listPlugins(Path home, String[] args) throws Exception { String[] argsAndHome = new String[args.length + 1]; System.arraycopy(args, 0, argsAndHome, 0, args.length); @@ -69,16 +69,16 @@ public class ListPluginsCommandTests extends ESTestCase { assertEquals(ExitCodes.OK, status); return terminal; } - + static String buildMultiline(String... args){ return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n")); } - - static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException { + + static void buildFakePlugin(Environment env, String description, String name, String classname, String version) throws IOException { PluginTestUtil.writeProperties(env.pluginsFile().resolve(name), "description", description, "name", name, - "version", "1.0", + "version", version, "elasticsearch.version", Version.CURRENT.toString(), "java.version", System.getProperty("java.specification.version"), "classname", classname); @@ -97,51 +97,51 @@ public class ListPluginsCommandTests extends ESTestCase { } public void testOnePlugin() throws Exception { - buildFakePlugin(env, "fake desc", "fake", "org.fake"); + buildFakePlugin(env, "fake desc", "fake", "org.fake", "1.0.0"); MockTerminal terminal = listPlugins(home); - assertEquals(terminal.getOutput(), buildMultiline("fake")); + assertEquals(terminal.getOutput(), buildMultiline("fake@1.0.0")); } public void testTwoPlugins() throws Exception { - buildFakePlugin(env, "fake desc", "fake1", "org.fake"); - buildFakePlugin(env, "fake desc 2", "fake2", "org.fake"); + buildFakePlugin(env, "fake desc", "fake1", "org.fake", "1.2.3"); + buildFakePlugin(env, "fake desc 2", "fake2", "org.fake", "6.5.4"); MockTerminal terminal = listPlugins(home); - assertEquals(terminal.getOutput(), buildMultiline("fake1", "fake2")); + assertEquals(terminal.getOutput(), buildMultiline("fake1@1.2.3", "fake2@6.5.4")); } - + public void testPluginWithVerbose() throws Exception { - buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake"); + buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake", "1.0.0"); String[] params = { "-v" }; MockTerminal terminal = listPlugins(home, params); - assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin", - "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0", " * Classname: org.fake")); + assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin@1.0.0", + "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0.0", " * Classname: org.fake")); } - + public void testPluginWithVerboseMultiplePlugins() throws Exception { - buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); - buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); + buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.2.3"); + buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "6.5.4"); String[] params = { "-v" }; MockTerminal terminal = listPlugins(home, params); assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), - "fake_plugin1", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", - " * Classname: org.fake", "fake_plugin2", "- Plugin information:", "Name: fake_plugin2", - "Description: fake desc 2", "Version: 1.0", " * Classname: org.fake2")); + "fake_plugin1@1.2.3", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.2.3", + " * Classname: org.fake", "fake_plugin2@6.5.4", "- Plugin information:", "Name: fake_plugin2", + "Description: fake desc 2", "Version: 6.5.4", " * Classname: org.fake2")); } - + public void testPluginWithoutVerboseMultiplePlugins() throws Exception { - buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); - buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); + buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.0.0"); + buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "1.0.0"); MockTerminal terminal = listPlugins(home, new String[0]); String output = terminal.getOutput(); - assertEquals(output, buildMultiline("fake_plugin1", "fake_plugin2")); + assertEquals(output, buildMultiline("fake_plugin1@1.0.0", "fake_plugin2@1.0.0")); } - + public void testPluginWithoutDescriptorFile() throws Exception{ Files.createDirectories(env.pluginsFile().resolve("fake1")); NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home)); assertEquals(e.getFile(), env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString()); } - + public void testPluginWithWrongDescriptorFile() throws Exception{ PluginTestUtil.writeProperties(env.pluginsFile().resolve("fake1"), "description", "fake desc"); @@ -149,5 +149,5 @@ public class ListPluginsCommandTests extends ESTestCase { assertEquals(e.getMessage(), "Property [name] is missing in [" + env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString() + "]"); } - + } From 201217945fdcb667ad7b0f9b54f926485ccab3b7 Mon Sep 17 00:00:00 2001 From: Alex Benusovich Date: Fri, 26 Aug 2016 16:59:22 -0700 Subject: [PATCH 02/18] Fix IndexNotFoundException if an multi index search request had a concrete index followed by an add/remove concrete index. The code now properly adds/removes the index instead of throwing an exception. Closes #3839 --- .../metadata/IndexNameExpressionResolver.java | 18 ++++++++---------- .../WildcardExpressionResolverTests.java | 4 ++++ 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index df53395fe27..6ecf7483d80 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -607,23 +607,21 @@ public class IndexNameExpressionResolver extends AbstractComponent { add = false; expression = expression.substring(1); } + if (result == null) { + // add all the previous ones... + result = new HashSet<>(expressions.subList(0, i)); + } if (!Regex.isSimpleMatchPattern(expression)) { if (!unavailableIgnoredOrExists(options, metaData, expression)) { throw infe(expression); } - if (result != null) { - if (add) { - result.add(expression); - } else { - result.remove(expression); - } + if (add) { + result.add(expression); + } else { + result.remove(expression); } continue; } - if (result == null) { - // add all the previous ones... - result = new HashSet<>(expressions.subList(0, i)); - } final IndexMetaData.State excludeState = excludeState(options); final Map matches = matches(metaData, expression); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 744477d6722..01110e796e8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -49,6 +49,10 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0)); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0)); } public void testConvertWildcardsTests() { From 4a1a09cf430bf7c4b5f8d85ce624a9d6df26092e Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 9 Sep 2016 13:00:41 +0200 Subject: [PATCH 03/18] Fix highlighting of MultiTermQuery within a FunctionScoreQuery Since the sub query of a function score query is checked on CustomQueryScorer#extractUnknwonQuery we try to extract the terms from the rewritten form of the sub query. MultiTermQuery rewrites query within a constant score query/weight which returns an empty array when extractTerms is called. The extraction of the inner terms of a constant score query/weight changed in Lucene somewhere between ES version 2.3 and 2.4 (https://issues.apache.org/jira/browse/LUCENE-6425) which is why this problem occurs on ES > 2.3. This change moves the extraction of the sub query from CustomQueryScorer#extractUnknownQuery to CustomQueryScorer#extract in order to do the extraction of the terms on the original form of the sub query. This fixes highlighting of sub queries that extend MultiTermQuery since there is a special path for this kind of query in the QueryScorer (which extract the terms to highlight). --- .../subphase/highlight/CustomQueryScorer.java | 11 +++++------ .../highlight/HighlighterSearchIT.java | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java index b62d28f8ab4..4816cba56df 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/CustomQueryScorer.java @@ -78,10 +78,7 @@ public final class CustomQueryScorer extends QueryScorer { @Override protected void extractUnknownQuery(Query query, Map terms) throws IOException { - if (query instanceof FunctionScoreQuery) { - query = ((FunctionScoreQuery) query).getSubQuery(); - extract(query, 1F, terms); - } else if (query instanceof FiltersFunctionScoreQuery) { + if (query instanceof FiltersFunctionScoreQuery) { query = ((FiltersFunctionScoreQuery) query).getSubQuery(); extract(query, 1F, terms); } else if (terms.isEmpty()) { @@ -97,9 +94,11 @@ public final class CustomQueryScorer extends QueryScorer { } else if (query instanceof HasChildQueryBuilder.LateParsingQuery) { // skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999 return; + } else if (query instanceof FunctionScoreQuery) { + super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms); + } else { + super.extract(query, boost, terms); } - - super.extract(query, boost, terms); } } } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 843ab09b2fe..c0fe9bab306 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; @@ -2851,4 +2852,21 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertThat(field.getFragments()[0].string(), equalTo("brown")); assertThat(field.getFragments()[1].string(), equalTo("cow")); } + + public void testFunctionScoreQueryHighlight() throws Exception { + client().prepareIndex("test", "type", "1") + .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + SearchResponse searchResponse = client().prepareSearch() + .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) + .highlighter(new HighlightBuilder() + .field(new Field("text"))) + .get(); + assertHitCount(searchResponse, 1); + HighlightField field = searchResponse.getHits().getAt(0).highlightFields().get("text"); + assertThat(field.getFragments().length, equalTo(1)); + assertThat(field.getFragments()[0].string(), equalTo("brown")); + } } From 3b90906f4f5a14d662b85b72d48d39f1d06cc709 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 12 Sep 2016 10:53:32 -0400 Subject: [PATCH 04/18] Adapt release flag to new CLI arguments The JDK project is in the process of modifying the command-line flags for various JDK tools (http://openjdk.java.net/jeps/293). In particular, the release flag on javac has changed from -release to --release. This commit adapts the build process to this change. Relates #20420 --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 4d7bee866b8..7ccdbcee221 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -405,9 +405,9 @@ class BuildPlugin implements Plugin { //options.incremental = true if (project.javaVersion == JavaVersion.VERSION_1_9) { - // hack until gradle supports java 9's new "-release" arg + // hack until gradle supports java 9's new "--release" arg assert minimumJava == JavaVersion.VERSION_1_8 - options.compilerArgs << '-release' << '8' + options.compilerArgs << '--release' << '8' project.sourceCompatibility = null project.targetCompatibility = null } From 2a1ed8026265878741f67d501f7b1a806c99cea6 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 12 Sep 2016 16:32:32 +0200 Subject: [PATCH 05/18] With #20093 we fixed a NPE thrown when using _source include/exclude and source is disabled in the mappings. Fixing meant ignoring the _source parameter in the request as no fields can be extracted from it. We should rather throw a clear exception to clearly point out that we cannot extract fields from _source. Note that this happens only when explicitly trying to extract fields from source. When source is disabled and no _source parameter is specified, no errors will be thrown and no source will be returned. Closes #20408 Relates to #20093 --- .../fetch/subphase/FetchSourceSubPhase.java | 8 +- .../subphase/FetchSourceSubPhaseTests.java | 81 +++++++++++-------- .../highlight/HighlighterSearchIT.java | 5 +- .../search/fields/SearchFieldsIT.java | 2 +- 4 files changed, 56 insertions(+), 40 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index c67b96c7af5..fe5a9f286c1 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -36,9 +36,6 @@ public final class FetchSourceSubPhase implements FetchSubPhase { return; } SourceLookup source = context.lookup().source(); - if (source.internalSourceRef() == null) { - return; // source disabled in the mapping - } FetchSourceContext fetchSourceContext = context.fetchSourceContext(); assert fetchSourceContext.fetchSource(); if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) { @@ -46,6 +43,11 @@ public final class FetchSourceSubPhase implements FetchSubPhase { return; } + if (source.internalSourceRef() == null) { + throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + context.indexShard().shardId().getIndexName() + "]"); + } + Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes()); try { final int initialCapacity = Math.min(1024, source.internalSourceRef().length()); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 43461929bd9..d20fb4e0c06 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -23,6 +23,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.SearchContext; @@ -33,37 +35,11 @@ import org.elasticsearch.test.TestSearchContext; import java.io.IOException; import java.util.Collections; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class FetchSourceSubPhaseTests extends ESTestCase { - static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext { - - FetchSourceContext context; - BytesReference source; - - FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) { - super(null); - this.context = context; - this.source = source; - } - - @Override - public boolean sourceRequested() { - return context != null && context.fetchSource(); - } - - @Override - public FetchSourceContext fetchSourceContext() { - return context; - } - - @Override - public SearchLookup lookup() { - SearchLookup lookup = super.lookup(); - lookup.source().setSource(source); - return lookup; - } - } - public void testFetchSource() throws IOException { XContentBuilder source = XContentFactory.jsonBuilder().startObject() .field("field", "value") @@ -109,11 +85,14 @@ public class FetchSourceSubPhaseTests extends ESTestCase { hitContext = hitExecute(null, false, null, null); assertNull(hitContext.hit().sourceAsMap()); - hitContext = hitExecute(null, true, "field1", null); - assertNull(hitContext.hit().sourceAsMap()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> hitExecute(null, true, "field1", null)); + assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [index]", exception.getMessage()); - hitContext = hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"}); - assertNull(hitContext.hit().sourceAsMap()); + exception = expectThrows(IllegalArgumentException.class, + () -> hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"})); + assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [index]", exception.getMessage()); } private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { @@ -131,4 +110,40 @@ public class FetchSourceSubPhaseTests extends ESTestCase { phase.hitExecute(searchContext, hitContext); return hitContext; } + + private static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext { + final FetchSourceContext context; + final BytesReference source; + final IndexShard indexShard; + + FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) { + super(null); + this.context = context; + this.source = source; + this.indexShard = mock(IndexShard.class); + when(indexShard.shardId()).thenReturn(new ShardId("index", "index", 1)); + } + + @Override + public boolean sourceRequested() { + return context != null && context.fetchSource(); + } + + @Override + public FetchSourceContext fetchSourceContext() { + return context; + } + + @Override + public SearchLookup lookup() { + SearchLookup lookup = super.lookup(); + lookup.source().setSource(source); + return lookup; + } + + @Override + public IndexShard indexShard() { + return indexShard; + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 843ab09b2fe..7c72b533ec8 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -50,8 +49,8 @@ import org.hamcrest.Matcher; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -96,7 +95,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); + return Collections.singletonList(InternalSettingsPlugin.class); } public void testHighlightingWithWildcardName() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index b091149fa01..e66eeb48766 100644 --- a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -80,7 +80,7 @@ public class SearchFieldsIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(CustomScriptPlugin.class); + return Collections.singletonList(CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { From f39f9b9760c20af0205d80b370888ec4376efbfd Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 12 Sep 2016 12:07:51 -0400 Subject: [PATCH 06/18] Update discovery nodes after cluster state is published (#20409) Before, when there was a new cluster state to publish, zen discovery would first update the set of nodes to ping based on the new cluster state, then publish the new cluster state. This is problematic because if the cluster state failed to publish, then the set of nodes to ping should not have been updated. This commit fixes the issue by updating the set of nodes to ping for fault detection only *after* the new cluster state has been published. --- .../discovery/zen/ZenDiscovery.java | 13 ++- .../discovery/zen/fd/NodesFaultDetection.java | 10 ++ .../discovery/zen/ZenDiscoveryUnitTests.java | 97 ++++++++++++++++++- .../PublishClusterStateActionTests.java | 42 ++++---- .../test/ClusterServiceUtils.java | 9 +- 5 files changed, 147 insertions(+), 24 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index c4fc4f15f40..f419da06e68 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -318,7 +318,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) { throw new IllegalStateException("Shouldn't publish state when not master"); } - nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + try { publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener); } catch (FailedToCommitClusterStateException t) { @@ -338,6 +338,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover }); throw t; } + + // update the set of nodes to ping after the new cluster state has been published + nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + } + + /** + * Gets the current set of nodes involved in the node fault detection. + * NB: for testing purposes + */ + public Set getFaultDetectionNodes() { + return nodesFD.getNodes(); } @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java index 40eb36cec1f..0ab5bde25cd 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java @@ -41,6 +41,8 @@ import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Collections; +import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -91,6 +93,14 @@ public class NodesFaultDetection extends FaultDetection { listeners.remove(listener); } + /** + * Gets the current set of nodes involved in node fault detection. + * NB: For testing purposes. + */ + public Set getNodes() { + return Collections.unmodifiableSet(nodesFD.keySet()); + } + /** * make sure that nodes in clusterState are pinged. Any pinging to nodes which are not * part of the cluster will be stopped diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index ba4c14c2058..235df2d8a35 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -20,25 +20,44 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPing; +import org.elasticsearch.discovery.zen.ping.ZenPingService; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.MockNode; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState; +import static org.elasticsearch.discovery.zen.elect.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; +import static org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.createMockNode; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -107,7 +126,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ArrayList masterNodes = new ArrayList<>(); ArrayList allNodes = new ArrayList<>(); for (int i = randomIntBetween(10, 20); i >= 0; i--) { - Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); + Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values()))); DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles, Version.CURRENT); responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean())); @@ -127,4 +146,80 @@ public class ZenDiscoveryUnitTests extends ESTestCase { assertThat(filteredNodes, equalTo(allNodes)); } } + + public void testNodesUpdatedAfterClusterStatePublished() throws Exception { + ThreadPool threadPool = new TestThreadPool(getClass().getName()); + // randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure + int minMasterNodes = randomBoolean() ? 3 : 1; + Settings settings = Settings.builder() + .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build(); + + Map nodes = new HashMap<>(); + ZenDiscovery zenDiscovery = null; + ClusterService clusterService = null; + try { + Set expectedFDNodes = null; + // create master node and its mocked up services + MockNode master = createMockNode("master", settings, null, threadPool, logger, nodes).setAsMaster(); + ClusterState state = master.clusterState; // initial cluster state + + // build the zen discovery and cluster service + clusterService = createClusterService(threadPool, master.discoveryNode); + setState(clusterService, state); + zenDiscovery = buildZenDiscovery(settings, master, clusterService, threadPool); + + // a new cluster state with a new discovery node (we will test if the cluster state + // was updated by the presence of this node in NodesFaultDetection) + MockNode newNode = createMockNode("new_node", settings, null, threadPool, logger, nodes); + ClusterState newState = ClusterState.builder(state).incrementVersion().nodes( + DiscoveryNodes.builder(state.nodes()).add(newNode.discoveryNode).masterNodeId(master.discoveryNode.getId()) + ).build(); + + try { + // publishing a new cluster state + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state); + AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1); + expectedFDNodes = zenDiscovery.getFaultDetectionNodes(); + zenDiscovery.publish(clusterChangedEvent, listener); + listener.await(1, TimeUnit.HOURS); + // publish was a success, update expected FD nodes based on new cluster state + expectedFDNodes = fdNodesForState(newState, master.discoveryNode); + } catch (Discovery.FailedToCommitClusterStateException e) { + // not successful, so expectedFDNodes above should remain what it was originally assigned + assertEquals(3, minMasterNodes); // ensure min master nodes is the higher value, otherwise we shouldn't fail + } + + assertEquals(expectedFDNodes, zenDiscovery.getFaultDetectionNodes()); + } finally { + // clean close of transport service and publish action for each node + zenDiscovery.close(); + clusterService.close(); + for (MockNode curNode : nodes.values()) { + curNode.action.close(); + curNode.service.close(); + } + terminate(threadPool); + } + } + + private ZenDiscovery buildZenDiscovery(Settings settings, MockNode master, ClusterService clusterService, ThreadPool threadPool) { + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ZenPingService zenPingService = new ZenPingService(settings, Collections.emptySet()); + ElectMasterService electMasterService = new ElectMasterService(settings); + ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, master.service, clusterService, + clusterSettings, zenPingService, electMasterService); + zenDiscovery.start(); + return zenDiscovery; + } + + private Set fdNodesForState(ClusterState clusterState, DiscoveryNode localNode) { + final Set discoveryNodes = new HashSet<>(); + clusterState.getNodes().getNodes().valuesIt().forEachRemaining(discoveryNode -> { + // the local node isn't part of the nodes that are pinged (don't ping ourselves) + if (discoveryNode.getId().equals(localNode.getId()) == false) { + discoveryNodes.add(discoveryNode); + } + }); + return discoveryNodes; + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 31c828ec30f..1b0d6f63fd5 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -145,21 +145,22 @@ public class PublishClusterStateActionTests extends ESTestCase { } public MockNode createMockNode(final String name) throws Exception { - return createMockNode(name, Settings.EMPTY); - } - - public MockNode createMockNode(String name, Settings settings) throws Exception { - return createMockNode(name, settings, null); + return createMockNode(name, Settings.EMPTY, null); } public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception { + return createMockNode(name, basSettings, listener, threadPool, logger, nodes); + } + + public static MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener, + ThreadPool threadPool, Logger logger, Map nodes) throws Exception { final Settings settings = Settings.builder() .put("name", name) .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(basSettings) .build(); - MockTransportService service = buildTransportService(settings); + MockTransportService service = buildTransportService(settings, threadPool); DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(), NodeEnvironment.generateNodeId(settings)); MockNode node = new MockNode(discoveryNode, service, listener, logger); @@ -228,14 +229,14 @@ public class PublishClusterStateActionTests extends ESTestCase { terminate(threadPool); } - protected MockTransportService buildTransportService(Settings settings) { - MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); + private static MockTransportService buildTransportService(Settings settings, ThreadPool threadPool) { + MockTransportService transportService = MockTransportService.local(settings, Version.CURRENT, threadPool); transportService.start(); transportService.acceptIncomingRequests(); return transportService; } - protected MockPublishAction buildPublishClusterStateAction( + private static MockPublishAction buildPublishClusterStateAction( Settings settings, MockTransportService transportService, Supplier clusterStateSupplier, @@ -253,8 +254,8 @@ public class PublishClusterStateActionTests extends ESTestCase { } public void testSimpleClusterStatePublishing() throws Exception { - MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster(); - MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); + MockNode nodeA = createMockNode("nodeA").setAsMaster(); + MockNode nodeB = createMockNode("nodeB"); // Initial cluster state ClusterState clusterState = nodeA.clusterState; @@ -282,7 +283,7 @@ public class PublishClusterStateActionTests extends ESTestCase { // Adding new node - this node should get full cluster state while nodeB should still be getting diffs - MockNode nodeC = createMockNode("nodeC", Settings.EMPTY); + MockNode nodeC = createMockNode("nodeC"); // cluster state update 3 - register node C previousClusterState = clusterState; @@ -336,7 +337,7 @@ public class PublishClusterStateActionTests extends ESTestCase { fail("Shouldn't send cluster state to myself"); }).setAsMaster(); - MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); + MockNode nodeB = createMockNode("nodeB"); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); @@ -444,7 +445,7 @@ public class PublishClusterStateActionTests extends ESTestCase { } }).setAsMaster(); - MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); + MockNode nodeB = createMockNode("nodeB"); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); @@ -495,7 +496,7 @@ public class PublishClusterStateActionTests extends ESTestCase { final int dataNodes = randomIntBetween(0, 5); final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(); for (int i = 0; i < dataNodes; i++) { - discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings).discoveryNode); + discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings, null).discoveryNode); } discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId()); DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); @@ -521,7 +522,7 @@ public class PublishClusterStateActionTests extends ESTestCase { settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing - MockNode master = createMockNode("master", settings.build()); + MockNode master = createMockNode("master", settings.build(), null); // randomize things a bit int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes]; @@ -551,7 +552,8 @@ public class PublishClusterStateActionTests extends ESTestCase { } final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter for (int i = 0; i < dataNodes; i++) { - final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()); + final MockNode mockNode = createMockNode("data_" + i, + Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(), null); discoveryNodesBuilder.add(mockNode.discoveryNode); if (randomBoolean()) { // we really don't care - just chaos monkey @@ -726,8 +728,8 @@ public class PublishClusterStateActionTests extends ESTestCase { Settings settings = Settings.builder() .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout - MockNode master = createMockNode("master", settings); - MockNode node = createMockNode("node", settings); + MockNode master = createMockNode("master", settings, null); + MockNode node = createMockNode("node", settings, null); ClusterState state = ClusterState.builder(master.clusterState) .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); @@ -843,7 +845,7 @@ public class PublishClusterStateActionTests extends ESTestCase { assertFalse(actual.wasReadFromDiff()); } - static class MockPublishAction extends PublishClusterStateAction { + public static class MockPublishAction extends PublishClusterStateAction { AtomicBoolean timeoutOnSend = new AtomicBoolean(); AtomicBoolean errorOnSend = new AtomicBoolean(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index a6d35930e6b..38682239b78 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -42,11 +42,16 @@ import static junit.framework.TestCase.fail; public class ClusterServiceUtils { public static ClusterService createClusterService(ThreadPool threadPool) { + DiscoveryNode discoveryNode = new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), + new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT); + return createClusterService(threadPool, discoveryNode); + } + + public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode) { ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); - clusterService.setLocalNode(new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), - new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT)); + clusterService.setLocalNode(localNode); clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override public void connectToAddedNodes(ClusterChangedEvent event) { From b08352047d5c3dfccec85b37e3de060aea08ed0d Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Mon, 12 Sep 2016 18:20:25 +0200 Subject: [PATCH 07/18] Introduce IndexShardTestCase (#20411) Introduce a base class for unit tests that are based on real `IndexShard`s. The base class takes care of all the little details needed to create and recover shards. This commit also moves `IndexShardTests` and `ESIndexLevelReplicationTestCase` to use the new base class. All tests in `IndexShardTests` that required a full node environment were moved to a new `IndexShardIT` suite. --- .../index/IndexServiceTests.java | 10 +- .../ESIndexLevelReplicationTestCase.java | 227 +-- .../index/shard/IndexShardIT.java | 476 ++++++ .../index/shard/IndexShardTests.java | 1336 ++++++----------- .../index/shard/ShardUtilsTests.java | 4 + .../IndexingMemoryControllerTests.java | 5 +- .../cluster/routing/ShardRoutingHelper.java | 0 .../index/shard/IndexShardTestCase.java | 477 ++++++ 8 files changed, 1419 insertions(+), 1116 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java rename {core/src/test => test/framework/src/main}/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java (100%) create mode 100644 test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 22324e1ff2b..afde263d73d 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -43,6 +43,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -52,13 +54,13 @@ import static org.hamcrest.Matchers.nullValue; public class IndexServiceTests extends ESSingleNodeTestCase { public void testDetermineShadowEngineShouldBeUsed() { Settings regularSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) .build(); Settings shadowSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index ec794091a42..2d6ef7f2069 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -18,15 +18,7 @@ */ package org.elasticsearch.index.replication; -import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexNotFoundException; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; @@ -41,52 +33,21 @@ import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MapperTestUtils; -import org.elasticsearch.index.cache.IndexCache; -import org.elasticsearch.index.cache.query.DisabledQueryCache; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.indices.recovery.RecoveryFailedException; -import org.elasticsearch.indices.recovery.RecoverySourceHandler; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; -import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; -import org.elasticsearch.indices.recovery.StartRecoveryRequest; -import org.elasticsearch.test.DummyShardLock; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -94,10 +55,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; @@ -107,98 +66,24 @@ import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { +public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase { - protected ThreadPool threadPool; protected final Index index = new Index("test", "uuid"); private final ShardId shardId = new ShardId(index, 0); private final Map indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); - protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { - @Override - public void onRecoveryDone(RecoveryState state) { - - } - - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - fail(ExceptionsHelper.detailedMessage(e)); - } - }; - - - @TestLogging("index.shard:TRACE,index.replication:TRACE,indices.recovery:TRACE") - public void testIndexingDuringFileRecovery() throws Exception { - try (ReplicationGroup shards = createGroup(randomInt(1))) { - shards.startAll(); - int docs = shards.indexDocs(randomInt(50)); - shards.flush(); - IndexShard replica = shards.addReplica(); - final CountDownLatch recoveryBlocked = new CountDownLatch(1); - final CountDownLatch releaseRecovery = new CountDownLatch(1); - final Future recoveryFuture = shards.asyncRecoverReplica(replica, - new BiFunction() { - @Override - public RecoveryTarget apply(IndexShard indexShard, DiscoveryNode node) { - return new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) { - @Override - public void renameAllTempFiles() throws IOException { - super.renameAllTempFiles(); - recoveryBlocked.countDown(); - try { - releaseRecovery.await(); - } catch (InterruptedException e) { - throw new IOException("terminated by interrupt", e); - } - } - }; - } - }); - - recoveryBlocked.await(); - docs += shards.indexDocs(randomInt(20)); - releaseRecovery.countDown(); - recoveryFuture.get(); - - shards.assertAllEqual(docs); - } - } - - @Override - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool(getClass().getName()); - } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); - } - - private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { - final ShardId shardId = shardPath.getShardId(); - final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return newFSDirectory(shardPath.resolveIndex()); - } - - @Override - public long throttleTimeInNanos() { - return 0; - } - }; - return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } protected ReplicationGroup createGroup(int replicas) throws IOException { - final Path homePath = createTempDir(); - Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); - IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).primaryTerm(0, 1).build(); - return new ReplicationGroup(metaData, homePath); + IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName()) + .settings(settings) + .primaryTerm(0, 1); + for (Map.Entry typeMapping: indexMapping.entrySet()) { + metaData.putMapping(typeMapping.getKey(), typeMapping.getValue()); + } + return new ReplicationGroup(metaData.build()); } protected DiscoveryNode getDiscoveryNode(String id) { @@ -206,50 +91,22 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); } - private IndexShard newShard(boolean primary, DiscoveryNode node, IndexMetaData indexMetaData, Path homePath) throws IOException { - // add node name to settings for propper logging - final Settings nodeSettings = Settings.builder().put("node.name", node.getName()).build(); - final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); - ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, node.getId(), primary, ShardRoutingState.INITIALIZING, - primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE); - final Path path = Files.createDirectories(homePath.resolve(node.getId())); - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path); - ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - Store store = createStore(indexSettings, shardPath); - IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); - MapperService mapperService = MapperTestUtils.newMapperService(homePath, indexSettings.getSettings()); - for (Map.Entry type : indexMapping.entrySet()) { - mapperService.merge(type.getKey(), new CompressedXContent(type.getValue()), MapperService.MergeReason.MAPPING_RECOVERY, true); - } - SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); - final IndexEventListener indexEventListener = new IndexEventListener() { - }; - final Engine.Warmer warmer = searcher -> { - }; - return new IndexShard(shardRouting, indexSettings, shardPath, store, indexCache, mapperService, similarityService, null, null, - indexEventListener, null, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, Collections.emptyList(), - Collections.emptyList()); - } - protected class ReplicationGroup implements AutoCloseable, Iterable { private final IndexShard primary; private final List replicas; private final IndexMetaData indexMetaData; - private final Path homePath; private final AtomicInteger replicaId = new AtomicInteger(); private final AtomicInteger docId = new AtomicInteger(); boolean closed = false; - ReplicationGroup(final IndexMetaData indexMetaData, Path homePath) throws IOException { - primary = newShard(true, getDiscoveryNode("s0"), indexMetaData, homePath); + ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { + primary = newShard(shardId, true, "s0", indexMetaData, null); replicas = new ArrayList<>(); this.indexMetaData = indexMetaData; - this.homePath = homePath; for (int i = 0; i < indexMetaData.getNumberOfReplicas(); i++) { addReplica(); } - } public int indexDocs(final int numOfDoc) throws Exception { @@ -289,7 +146,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { } public synchronized IndexShard addReplica() throws IOException { - final IndexShard replica = newShard(false, getDiscoveryNode("s" + replicaId.incrementAndGet()), indexMetaData, homePath); + final IndexShard replica = newShard(shardId, false,"s" + replicaId.incrementAndGet(), indexMetaData, null); replicas.add(replica); return replica; } @@ -304,39 +161,8 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { } public void recoverReplica(IndexShard replica, BiFunction targetSupplier, - boolean markAsRecovering) - throws IOException { - final DiscoveryNode pNode = getPrimaryNode(); - final DiscoveryNode rNode = getDiscoveryNode(replica.routingEntry().currentNodeId()); - if (markAsRecovering) { - replica.markAsRecovering("remote", - new RecoveryState(replica.routingEntry(), pNode, rNode)); - } else { - assertEquals(replica.state(), IndexShardState.RECOVERING); - } - replica.prepareForIndexRecovery(); - RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode); - StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode, - getMetadataSnapshotOrEmpty(replica), false, 0); - RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {}, - (int) ByteSizeUnit.MB.toKB(1), logger); - recovery.recoverToTarget(); - recoveryTarget.markAsDone(); - replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry())); - } - - private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException { - Store.MetadataSnapshot result; - try { - result = replica.snapshotStoreMetadata(); - } catch (IndexNotFoundException e) { - // OK! - result = Store.MetadataSnapshot.EMPTY; - } catch (IOException e) { - logger.warn("failed read store, treating as empty", e); - result = Store.MetadataSnapshot.EMPTY; - } - return result; + boolean markAsRecovering) throws IOException { + ESIndexLevelReplicationTestCase.this.recoverReplica(replica, primary, targetSupplier, markAsRecovering); } public synchronized DiscoveryNode getPrimaryNode() { @@ -367,24 +193,6 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { } } - private Set getShardDocUIDs(final IndexShard shard) throws IOException { - shard.refresh("get_uids"); - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - Set ids = new HashSet<>(); - for (LeafReaderContext leafContext : searcher.reader().leaves()) { - LeafReader reader = leafContext.reader(); - Bits liveDocs = reader.getLiveDocs(); - for (int i = 0; i < reader.maxDoc(); i++) { - if (liveDocs == null || liveDocs.get(i)) { - Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME)); - ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME))); - } - } - } - return ids; - } - } - public synchronized void refresh(String source) { for (IndexShard shard : this) { shard.refresh(source); @@ -406,10 +214,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { public synchronized void close() throws Exception { if (closed == false) { closed = true; - for (IndexShard shard : this) { - shard.close("eol", false); - IOUtils.close(shard.store()); - } + closeShards(this); } else { throw new AlreadyClosedException("too bad"); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java new file mode 100644 index 00000000000..fc943bcebe9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -0,0 +1,476 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.ShardLock; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.flush.FlushStats; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.InternalSettingsPlugin; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +public class IndexShardIT extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, + ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { + Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE); + Field versionField = new NumericDocValuesField("_version", 0); + document.add(uidField); + document.add(versionField); + return new ParsedDocument(versionField, id, type, routing, timestamp, ttl, Collections.singletonList(document), source, + mappingUpdate); + } + + public void testLockTryingToDelete() throws Exception { + createIndex("test"); + ensureGreen(); + NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); + + ClusterService cs = getInstanceFromNode(ClusterService.class); + final Index index = cs.state().metaData().index("test").getIndex(); + Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0)); + logger.info("--> paths: [{}]", (Object)shardPaths); + // Should not be able to acquire the lock because it's already open + try { + NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths); + fail("should not have been able to acquire the lock"); + } catch (LockObtainFailedException e) { + assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); + } + // Test without the regular shard lock to assume we can acquire it + // (worst case, meaning that the shard lock could be acquired and + // we're green to delete the shard's directory) + ShardLock sLock = new DummyShardLock(new ShardId(index, 0)); + try { + env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY)); + fail("should not have been able to delete the directory"); + } catch (LockObtainFailedException e) { + assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); + } + } + + public void testMarkAsInactiveTriggersSyncedFlush() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); + client().prepareIndex("test", "test").setSource("{}").get(); + ensureGreen("test"); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); + assertBusy(() -> { + IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); + assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); + } + ); + IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); + assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); + } + + public void testDurableFlagHasEffect() { + createIndex("test"); + ensureGreen(); + client().prepareIndex("test", "bar", "1").setSource("{}").get(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService(resolveIndex("test")); + IndexShard shard = test.getShardOrNull(0); + setDurability(shard, Translog.Durability.REQUEST); + assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + setDurability(shard, Translog.Durability.ASYNC); + client().prepareIndex("test", "bar", "2").setSource("{}").get(); + assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + setDurability(shard, Translog.Durability.REQUEST); + client().prepareDelete("test", "bar", "1").get(); + assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + + setDurability(shard, Translog.Durability.ASYNC); + client().prepareDelete("test", "bar", "2").get(); + assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + setDurability(shard, Translog.Durability.REQUEST); + assertNoFailures(client().prepareBulk() + .add(client().prepareIndex("test", "bar", "3").setSource("{}")) + .add(client().prepareDelete("test", "bar", "1")).get()); + assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + + setDurability(shard, Translog.Durability.ASYNC); + assertNoFailures(client().prepareBulk() + .add(client().prepareIndex("test", "bar", "4").setSource("{}")) + .add(client().prepareDelete("test", "bar", "3")).get()); + setDurability(shard, Translog.Durability.REQUEST); + assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded()); + } + + private void setDurability(IndexShard shard, Translog.Durability durability) { + client().admin().indices().prepareUpdateSettings(shard.shardId().getIndexName()).setSettings( + Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get(); + assertEquals(durability, shard.getTranslogDurability()); + } + + public void testUpdatePriority() { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(IndexMetaData.SETTING_PRIORITY, 200)); + IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); + assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400) + .build()).get(); + assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); + } + + public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { + Environment env = getInstanceFromNode(Environment.class); + Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10)); + logger.info("--> idxPath: [{}]", idxPath); + Settings idxSettings = Settings.builder() + .put(IndexMetaData.SETTING_DATA_PATH, idxPath) + .build(); + createIndex("test", idxSettings); + ensureGreen("test"); + client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse response = client().prepareSearch("test").get(); + assertHitCount(response, 1L); + client().admin().indices().prepareDelete("test").get(); + assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); + assertPathHasBeenCleared(idxPath); + } + + public void testExpectedShardSizeIsPresent() throws InterruptedException { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); + for (int i = 0; i < 50; i++) { + client().prepareIndex("test", "test").setSource("{}").get(); + } + ensureGreen("test"); + InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class); + clusterInfoService.refresh(); + ClusterState state = getInstanceFromNode(ClusterService.class).state(); + Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test") + .getShards().get(0).primaryShard()); + assertNotNull(test); + assertTrue(test > 0); + } + + public void testIndexCanChangeCustomDataPath() throws Exception { + Environment env = getInstanceFromNode(Environment.class); + Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10)); + final String INDEX = "idx"; + Path startDir = idxPath.resolve("start-" + randomAsciiOfLength(10)); + Path endDir = idxPath.resolve("end-" + randomAsciiOfLength(10)); + logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString()); + logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString()); + // temp dirs are automatically created, but the end dir is what + // startDir is going to be renamed as, so it needs to be deleted + // otherwise we get all sorts of errors about the directory + // already existing + IOUtils.rm(endDir); + + Settings sb = Settings.builder() + .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString()) + .build(); + Settings sb2 = Settings.builder() + .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString()) + .build(); + + logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString()); + createIndex(INDEX, sb); + ensureGreen(INDEX); + client().prepareIndex(INDEX, "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get(); + + SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); + assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); + + logger.info("--> closing the index [{}]", INDEX); + client().admin().indices().prepareClose(INDEX).get(); + logger.info("--> index closed, re-opening..."); + client().admin().indices().prepareOpen(INDEX).get(); + logger.info("--> index re-opened"); + ensureGreen(INDEX); + + resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); + assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); + + // Now, try closing and changing the settings + + logger.info("--> closing the index [{}]", INDEX); + client().admin().indices().prepareClose(INDEX).get(); + + logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName()); + assert Files.exists(endDir) == false : "end directory should not exist!"; + Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING); + + logger.info("--> updating settings..."); + client().admin().indices().prepareUpdateSettings(INDEX) + .setSettings(sb2) + .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true)) + .get(); + + assert Files.exists(startDir) == false : "start dir shouldn't exist"; + + logger.info("--> settings updated and files moved, re-opening index"); + client().admin().indices().prepareOpen(INDEX).get(); + logger.info("--> index re-opened"); + ensureGreen(INDEX); + + resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); + assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); + + assertAcked(client().admin().indices().prepareDelete(INDEX)); + assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); + assertPathHasBeenCleared(startDir.toAbsolutePath()); + assertPathHasBeenCleared(endDir.toAbsolutePath()); + } + + public void testMaybeFlush() throws Exception { + createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) + .build()); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService(resolveIndex("test")); + IndexShard shard = test.getShardOrNull(0); + assertFalse(shard.shouldFlush()); + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), + new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); + client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + assertFalse(shard.shouldFlush()); + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), + new BytesArray(new byte[]{1}), null); + Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); + shard.index(index); + assertTrue(shard.shouldFlush()); + assertEquals(2, shard.getEngine().getTranslog().totalOperations()); + client().prepareIndex("test", "test", "2").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + assertBusy(() -> { // this is async + assertFalse(shard.shouldFlush()); + }); + assertEquals(0, shard.getEngine().getTranslog().totalOperations()); + shard.getEngine().getTranslog().sync(); + long size = shard.getEngine().getTranslog().sizeInBytes(); + logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), + shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( + IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) + .build()).get(); + client().prepareDelete("test", "test", "2").get(); + logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), + shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); + assertBusy(() -> { // this is async + logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), + shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); + assertFalse(shard.shouldFlush()); + }); + assertEquals(0, shard.getEngine().getTranslog().totalOperations()); + } + + public void testStressMaybeFlush() throws Exception { + createIndex("test"); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService(resolveIndex("test")); + final IndexShard shard = test.getShardOrNull(0); + assertFalse(shard.shouldFlush()); + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( + IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), + new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); + client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + assertFalse(shard.shouldFlush()); + final AtomicBoolean running = new AtomicBoolean(true); + final int numThreads = randomIntBetween(2, 4); + Thread[] threads = new Thread[numThreads]; + CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread() { + @Override + public void run() { + try { + barrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new RuntimeException(e); + } + while (running.get()) { + shard.maybeFlush(); + } + } + }; + threads[i].start(); + } + barrier.await(); + FlushStats flushStats = shard.flushStats(); + long total = flushStats.getTotal(); + client().prepareIndex("test", "test", "1").setSource("{}").get(); + assertBusy(() -> assertEquals(total + 1, shard.flushStats().getTotal())); + running.set(false); + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + } + assertEquals(total + 1, shard.flushStats().getTotal()); + } + + public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { + createIndex("test"); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexService(resolveIndex("test")); + IndexShard shard = indexService.getShardOrNull(0); + client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get(); + client().prepareDelete("test", "test", "0").get(); + client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); + + IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; + shard.close("simon says", false); + AtomicReference shardRef = new AtomicReference<>(); + List failures = new ArrayList<>(); + IndexingOperationListener listener = new IndexingOperationListener() { + + @Override + public void postIndex(Engine.Index index, boolean created) { + try { + assertNotNull(shardRef.get()); + // this is all IMC needs to do - check current memory and refresh + assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); + shardRef.get().refresh("test"); + } catch (Exception e) { + failures.add(e); + throw e; + } + } + + + @Override + public void postDelete(Engine.Delete delete) { + try { + assertNotNull(shardRef.get()); + // this is all IMC needs to do - check current memory and refresh + assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); + shardRef.get().refresh("test"); + } catch (Exception e) { + failures.add(e); + throw e; + } + } + }; + final IndexShard newShard = newIndexShard(indexService, shard, wrapper, listener); + shardRef.set(newShard); + recoverShard(newShard); + + try { + ExceptionsHelper.rethrowAndSuppress(failures); + } finally { + newShard.close("just do it", randomBoolean()); + } + } + + + public static final IndexShard recoverShard(IndexShard newShard) throws IOException { + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); + assertTrue(newShard.recoverFromStore()); + newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + return newShard; + } + + public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, + IndexingOperationListener... listeners) throws IOException { + ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); + IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), + shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), + indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, + indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners)); + return newShard; + } + + private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) { + ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(), + existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING, + existingShardRouting.allocationId()); + shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE); + return shardRouting; + } +} diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b4725a8506d..c0375b2f98b 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -30,45 +30,28 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.Constants; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.TransportIndexAction; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.AllocationId; -import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; -import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -76,21 +59,13 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.ShardLock; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext; @@ -101,9 +76,9 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogTests; -import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -112,20 +87,14 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.test.DummyShardLock; -import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.FieldMaskingReader; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -140,38 +109,23 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; /** * Simple unit-test IndexShard related operations. */ -public class IndexShardTests extends ESSingleNodeTestCase { - - @Override - protected Collection> getPlugins() { - return pluginList(InternalSettingsPlugin.class); - } +public class IndexShardTests extends IndexShardTestCase { public void testWriteShardState() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { @@ -197,73 +151,41 @@ public class IndexShardTests extends ESSingleNodeTestCase { } } - public void testLockTryingToDelete() throws Exception { - createIndex("test"); - ensureGreen(); - NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - - ClusterService cs = getInstanceFromNode(ClusterService.class); - final Index index = cs.state().metaData().index("test").getIndex(); - Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0)); - logger.info("--> paths: [{}]", (Object)shardPaths); - // Should not be able to acquire the lock because it's already open - try { - NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths); - fail("should not have been able to acquire the lock"); - } catch (LockObtainFailedException e) { - assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); - } - // Test without the regular shard lock to assume we can acquire it - // (worst case, meaning that the shard lock could be acquired and - // we're green to delete the shard's directory) - ShardLock sLock = new DummyShardLock(new ShardId(index, 0)); - try { - env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY)); - fail("should not have been able to delete the directory"); - } catch (LockObtainFailedException e) { - assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); - } - } - public void testPersistenceStateMetadataPersistence() throws Exception { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); + IndexShard shard = newStartedShard(); + final Path shardStatePath = shard.shardPath().getShardStatePath(); + ShardStateMetaData shardStateMetaData = load(logger, shardStatePath); assertEquals(getShardStateMetadata(shard), shardStateMetaData); ShardRouting routing = shard.shardRouting; shard.updateRoutingEntry(routing); - shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); + shardStateMetaData = load(logger, shardStatePath); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - assertEquals(shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId())); + assertEquals(shardStateMetaData, + new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId())); routing = TestShardRouting.relocate(shard.shardRouting, "some node", 42L); shard.updateRoutingEntry(routing); - shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); + shardStateMetaData = load(logger, shardStatePath); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - assertEquals(shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId())); + assertEquals(shardStateMetaData, + new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId())); + closeShards(shard); } public void testFailShard() throws Exception { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); + IndexShard shard = newStartedShard(); + final ShardPath shardPath = shard.shardPath(); + assertNotNull(shardPath); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); + closeShards(shard); // check state file still exists - ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); + ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath()); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - ShardPath shardPath = ShardPath.loadShardPath(logger, env, shard.shardId(), test.getIndexSettings()); - assertNotNull(shardPath); // but index can't be opened for a failed shard - assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), env::shardLock), + assertThat("store index should be corrupted", Store.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), + (shardId, lockTimeoutMS) -> new DummyShardLock(shardId)), equalTo(false)); } @@ -286,10 +208,12 @@ public class IndexShardTests extends ESSingleNodeTestCase { public void testShardStateMetaHashCodeEquals() { AllocationId allocationId = randomBoolean() ? null : randomAllocationId(); - ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); + ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), + randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); assertEquals(meta, new ShardStateMetaData(meta.legacyVersion, meta.primary, meta.indexUUID, meta.allocationId)); - assertEquals(meta.hashCode(), new ShardStateMetaData(meta.legacyVersion, meta.primary, meta.indexUUID, meta.allocationId).hashCode()); + assertEquals(meta.hashCode(), + new ShardStateMetaData(meta.legacyVersion, meta.primary, meta.indexUUID, meta.allocationId).hashCode()); assertFalse(meta.equals(new ShardStateMetaData(meta.legacyVersion, !meta.primary, meta.indexUUID, meta.allocationId))); assertFalse(meta.equals(new ShardStateMetaData(meta.legacyVersion + 1, meta.primary, meta.indexUUID, meta.allocationId))); @@ -298,20 +222,17 @@ public class IndexShardTests extends ESSingleNodeTestCase { Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode allocationId = randomBoolean() ? null : randomAllocationId(); - meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); + meta = new ShardStateMetaData(randomLong(), randomBoolean(), + randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); } - public void testDeleteIndexPreventsNewOperations() throws InterruptedException, ExecutionException, IOException { - assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); - ensureGreen("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); - IndexShard indexShard = indexService.getShardOrNull(0); - client().admin().indices().prepareDelete("test").get(); + public void testClosesPreventsNewOperations() throws InterruptedException, ExecutionException, IOException { + IndexShard indexShard = newStartedShard(); + closeShards(indexShard); assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); try { indexShard.acquirePrimaryOperationLock(null, ThreadPool.Names.INDEX); @@ -328,35 +249,27 @@ public class IndexShardTests extends ESSingleNodeTestCase { } public void testOperationLocksOnPrimaryShards() throws InterruptedException, ExecutionException, IOException { - assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); - ensureGreen("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); - IndexShard indexShard = indexService.getShardOrNull(0); - long primaryTerm = indexShard.getPrimaryTerm(); + final ShardId shardId = new ShardId("test", "_na_", 0); + final IndexShard indexShard; - ShardRouting temp = indexShard.routingEntry(); - final ShardRouting newPrimaryShardRouting; if (randomBoolean()) { // relocation target - newPrimaryShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), "other node", - true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(temp.allocationId())); + indexShard = newShard(TestShardRouting.newShardRouting(shardId, "local_node", "other node", + true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); } else if (randomBoolean()) { // simulate promotion - ShardRouting newReplicaShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), null, - false, ShardRoutingState.STARTED, temp.allocationId()); - indexShard.updateRoutingEntry(newReplicaShardRouting); - primaryTerm = primaryTerm + 1; - indexShard.updatePrimaryTerm(primaryTerm); - newPrimaryShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), null, - true, ShardRoutingState.STARTED, temp.allocationId()); + indexShard = newShard(shardId, false); + ShardRouting replicaRouting = indexShard.routingEntry(); + indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1); + ShardRouting primaryRouting = TestShardRouting.newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, + true, ShardRoutingState.STARTED, replicaRouting.allocationId()); + indexShard.updateRoutingEntry(primaryRouting); } else { - newPrimaryShardRouting = temp; + indexShard = newStartedShard(true); } - indexShard.updateRoutingEntry(newPrimaryShardRouting); - + final long primaryTerm = indexShard.getPrimaryTerm(); assertEquals(0, indexShard.getActiveOperationsCount()); - if (newPrimaryShardRouting.isRelocationTarget() == false) { + if (indexShard.routingEntry().isRelocationTarget() == false) { try { indexShard.acquireReplicaOperationLock(primaryTerm, null, ThreadPool.Names.INDEX); fail("shard shouldn't accept operations as replica"); @@ -371,6 +284,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { Releasables.close(operation1, operation2); assertEquals(0, indexShard.getActiveOperationsCount()); + + closeShards(indexShard); } private Releasable acquirePrimaryOperationLockBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { @@ -379,56 +294,52 @@ public class IndexShardTests extends ESSingleNodeTestCase { return fut.get(); } - private Releasable acquireReplicaOperationLockBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { + private Releasable acquireReplicaOperationLockBlockingly(IndexShard indexShard, long opPrimaryTerm) + throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); indexShard.acquireReplicaOperationLock(opPrimaryTerm, fut, ThreadPool.Names.INDEX); return fut.get(); } public void testOperationLocksOnReplicaShards() throws InterruptedException, ExecutionException, IOException { - assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); - ensureGreen("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); - IndexShard indexShard = indexService.getShardOrNull(0); - long primaryTerm = indexShard.getPrimaryTerm(); + final ShardId shardId = new ShardId("test", "_na_", 0); + final IndexShard indexShard; - // ugly hack to allow the shard to operated as a replica - final ShardRouting temp = indexShard.routingEntry(); - final ShardRouting newShardRouting; switch (randomInt(2)) { case 0: // started replica - newShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), null, - false, ShardRoutingState.STARTED, AllocationId.newRelocation(temp.allocationId())); - - indexShard.updateRoutingEntry(newShardRouting); + indexShard = newStartedShard(false); break; - case 1: + case 1: { // initializing replica / primary final boolean relocating = randomBoolean(); - newShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), + ShardRouting routing = TestShardRouting.newShardRouting(shardId, "local_node", relocating ? "sourceNode" : null, relocating ? randomBoolean() : false, ShardRoutingState.INITIALIZING, - relocating ? AllocationId.newRelocation(temp.allocationId()) : temp.allocationId()); - indexShard.updateRoutingEntry(newShardRouting); + relocating ? AllocationId.newRelocation(AllocationId.newInitializing()) : AllocationId.newInitializing()); + indexShard = newShard(routing); break; - case 2: + } + case 2: { // relocation source - newShardRouting = TestShardRouting.newShardRouting(temp.shardId(), temp.currentNodeId(), "otherNode", - false, ShardRoutingState.RELOCATING, AllocationId.newRelocation(temp.allocationId())); - indexShard.updateRoutingEntry(newShardRouting); + indexShard = newStartedShard(false); + ShardRouting routing = indexShard.routingEntry(); + routing = TestShardRouting.newShardRouting(routing.shardId(), routing.currentNodeId(), "otherNode", + false, ShardRoutingState.RELOCATING, AllocationId.newRelocation(routing.allocationId())); + indexShard.updateRoutingEntry(routing); indexShard.relocated("test"); break; + } default: throw new UnsupportedOperationException("get your numbers straight"); } - logger.info("updated shard routing to {}", newShardRouting); + final ShardRouting shardRouting = indexShard.routingEntry(); + logger.info("shard routing to {}", shardRouting); assertEquals(0, indexShard.getActiveOperationsCount()); - if (newShardRouting.primary() == false) { + if (shardRouting.primary() == false) { try { indexShard.acquirePrimaryOperationLock(null, ThreadPool.Names.INDEX); fail("shard shouldn't accept primary ops"); @@ -437,6 +348,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { } } + final long primaryTerm = indexShard.getPrimaryTerm(); + Releasable operation1 = acquireReplicaOperationLockBlockingly(indexShard, primaryTerm); assertEquals(1, indexShard.getActiveOperationsCount()); Releasable operation2 = acquireReplicaOperationLockBlockingly(indexShard, primaryTerm); @@ -454,23 +367,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { acquireReplicaOperationLockBlockingly(indexShard, primaryTerm + 1 + randomInt(20)).close(); Releasables.close(operation1, operation2); assertEquals(0, indexShard.getActiveOperationsCount()); - } - public void testMarkAsInactiveTriggersSyncedFlush() throws Exception { - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); - client().prepareIndex("test", "test").setSource("{}").get(); - ensureGreen("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); - assertBusy(() -> { - IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); - assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); - } - ); - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); + closeShards(indexShard); } public static ShardStateMetaData load(Logger logger, Path... shardPaths) throws IOException { @@ -483,47 +381,40 @@ public class IndexShardTests extends ESSingleNodeTestCase { } public void testAcquireIndexCommit() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); + final IndexShard shard = newStartedShard(); int numDocs = randomInt(20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test", "type", "id_" + i).setSource("{}").get(); + indexDoc(shard, "type", "id_" + i); } final boolean flushFirst = randomBoolean(); IndexCommit commit = shard.acquireIndexCommit(flushFirst); int moreDocs = randomInt(20); for (int i = 0; i < moreDocs; i++) { - client().prepareIndex("test", "type", "id_" + numDocs + i).setSource("{}").get(); + indexDoc(shard, "type", "id_" + numDocs + i); } - shard.flush(new FlushRequest("index")); + flushShard(shard); // check that we can still read the commit that we captured try (IndexReader reader = DirectoryReader.open(commit)) { assertThat(reader.numDocs(), equalTo(flushFirst ? numDocs : 0)); } shard.releaseIndexCommit(commit); - shard.flush(new FlushRequest("index").force(true)); + flushShard(shard, true); + // check it's clean up assertThat(DirectoryReader.listCommits(shard.store().directory()), hasSize(1)); + + closeShards(shard); } /*** * test one can snapshot the store at various lifecycle stages */ public void testSnapshotStore() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); - client().admin().indices().prepareFlush().get(); - ShardRouting routing = shard.routingEntry(); - test.removeShard(0, "b/c simon says so"); - routing = routing.reinitializePrimaryShard(); - IndexShard newShard = test.createShard(routing); + final IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0"); + flushShard(shard); + + final IndexShard newShard = reinitShard(shard); DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); Store.MetadataSnapshot snapshot = newShard.snapshotStoreMetadata(); @@ -539,7 +430,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); - newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted()); + newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); @@ -548,48 +439,13 @@ public class IndexShardTests extends ESSingleNodeTestCase { snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); + + closeShards(newShard); } - public void testDurableFlagHasEffect() { - createIndex("test"); - ensureGreen(); - client().prepareIndex("test", "bar", "1").setSource("{}").get(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - setDurability(shard, Translog.Durability.REQUEST); - assertFalse(shard.getEngine().getTranslog().syncNeeded()); - setDurability(shard, Translog.Durability.ASYNC); - client().prepareIndex("test", "bar", "2").setSource("{}").get(); - assertTrue(shard.getEngine().getTranslog().syncNeeded()); - setDurability(shard, Translog.Durability.REQUEST); - client().prepareDelete("test", "bar", "1").get(); - assertFalse(shard.getEngine().getTranslog().syncNeeded()); - setDurability(shard, Translog.Durability.ASYNC); - client().prepareDelete("test", "bar", "2").get(); - assertTrue(shard.getEngine().getTranslog().syncNeeded()); - setDurability(shard, Translog.Durability.REQUEST); - assertNoFailures(client().prepareBulk() - .add(client().prepareIndex("test", "bar", "3").setSource("{}")) - .add(client().prepareDelete("test", "bar", "1")).get()); - assertFalse(shard.getEngine().getTranslog().syncNeeded()); - - setDurability(shard, Translog.Durability.ASYNC); - assertNoFailures(client().prepareBulk() - .add(client().prepareIndex("test", "bar", "4").setSource("{}")) - .add(client().prepareDelete("test", "bar", "3")).get()); - setDurability(shard, Translog.Durability.REQUEST); - assertTrue(shard.getEngine().getTranslog().syncNeeded()); - } - - public void testAsyncFsync() throws InterruptedException { - createIndex("test"); - ensureGreen(); - client().prepareIndex("test", "bar", "1").setSource("{}").get(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); + public void testAsyncFsync() throws InterruptedException, IOException { + IndexShard shard = newStartedShard(); Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); Thread[] thread = new Thread[randomIntBetween(3, 5)]; CountDownLatch latch = new CountDownLatch(thread.length); @@ -607,7 +463,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { } catch (Exception ex) { throw new RuntimeException(ex); } - }; + } + + ; }; thread[i].start(); } @@ -616,170 +474,37 @@ public class IndexShardTests extends ESSingleNodeTestCase { thread[i].join(); } assertTrue(semaphore.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); + + closeShards(shard); } - private void setDurability(IndexShard shard, Translog.Durability durability) { - client().admin().indices().prepareUpdateSettings(shard.shardId.getIndexName()).setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get(); - assertEquals(durability, shard.getTranslogDurability()); - } - - public void testMinimumCompatVersion() { + public void testMinimumCompatVersion() throws IOException { Version versionCreated = VersionUtils.randomVersion(random()); - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, SETTING_VERSION_CREATED, versionCreated.id)); - client().prepareIndex("test", "test").setSource("{}").get(); - ensureGreen("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard test = indicesService.indexService(resolveIndex("test")).getShardOrNull(0); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, versionCreated.id) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard test = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoveryShardFromStore(test); + + indexDoc(test, "test", "test"); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); - client().prepareIndex("test", "test").setSource("{}").get(); + indexDoc(test, "test", "test"); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); test.getEngine().flush(); assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion()); + + closeShards(test); } - public void testUpdatePriority() { - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(IndexMetaData.SETTING_PRIORITY, 200)); - IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get(); - assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); - } - - public void testRecoverIntoLeftover() throws IOException { - createIndex("test"); - ensureGreen("test"); - client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get(); - client().admin().indices().prepareFlush("test").get(); - SearchResponse response = client().prepareSearch("test").get(); - assertHitCount(response, 1L); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - ShardPath shardPath = shard.shardPath(); - Path dataPath = shardPath.getDataPath(); - client().admin().indices().prepareClose("test").get(); - Path tempDir = createTempDir(); - Files.move(dataPath, tempDir.resolve("test")); - client().admin().indices().prepareDelete("test").get(); - Files.createDirectories(dataPath.getParent()); - Files.move(tempDir.resolve("test"), dataPath); - createIndex("test"); - ensureGreen("test"); - response = client().prepareSearch("test").get(); - assertHitCount(response, 0L); - } - - public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { - Environment env = getInstanceFromNode(Environment.class); - Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10)); - logger.info("--> idxPath: [{}]", idxPath); - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_DATA_PATH, idxPath) - .build(); - createIndex("test", idxSettings); - ensureGreen("test"); - client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get(); - SearchResponse response = client().prepareSearch("test").get(); - assertHitCount(response, 1L); - client().admin().indices().prepareDelete("test").get(); - assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); - assertPathHasBeenCleared(idxPath); - } - - public void testExpectedShardSizeIsPresent() throws InterruptedException { - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); - for (int i = 0; i < 50; i++) { - client().prepareIndex("test", "test").setSource("{}").get(); - } - ensureGreen("test"); - InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class); - clusterInfoService.refresh(); - ClusterState state = getInstanceFromNode(ClusterService.class).state(); - Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test").getShards().get(0).primaryShard()); - assertNotNull(test); - assertTrue(test > 0); - } - - public void testIndexCanChangeCustomDataPath() throws Exception { - Environment env = getInstanceFromNode(Environment.class); - Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10)); - final String INDEX = "idx"; - Path startDir = idxPath.resolve("start-" + randomAsciiOfLength(10)); - Path endDir = idxPath.resolve("end-" + randomAsciiOfLength(10)); - logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString()); - logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString()); - // temp dirs are automatically created, but the end dir is what - // startDir is going to be renamed as, so it needs to be deleted - // otherwise we get all sorts of errors about the directory - // already existing - IOUtils.rm(endDir); - - Settings sb = Settings.builder() - .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString()) - .build(); - Settings sb2 = Settings.builder() - .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString()) - .build(); - - logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString()); - createIndex(INDEX, sb); - ensureGreen(INDEX); - client().prepareIndex(INDEX, "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get(); - - SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); - assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); - - logger.info("--> closing the index [{}]", INDEX); - client().admin().indices().prepareClose(INDEX).get(); - logger.info("--> index closed, re-opening..."); - client().admin().indices().prepareOpen(INDEX).get(); - logger.info("--> index re-opened"); - ensureGreen(INDEX); - - resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); - assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); - - // Now, try closing and changing the settings - - logger.info("--> closing the index [{}]", INDEX); - client().admin().indices().prepareClose(INDEX).get(); - - logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName()); - assert Files.exists(endDir) == false : "end directory should not exist!"; - Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING); - - logger.info("--> updating settings..."); - client().admin().indices().prepareUpdateSettings(INDEX) - .setSettings(sb2) - .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true)) - .get(); - - assert Files.exists(startDir) == false : "start dir shouldn't exist"; - - logger.info("--> settings updated and files moved, re-opening index"); - client().admin().indices().prepareOpen(INDEX).get(); - logger.info("--> index re-opened"); - ensureGreen(INDEX); - - resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); - assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); - - assertAcked(client().admin().indices().prepareDelete(INDEX)); - assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); - assertPathHasBeenCleared(startDir.toAbsolutePath()); - assertPathHasBeenCleared(endDir.toAbsolutePath()); - } public void testShardStats() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); + IndexShard shard = newStartedShard(); + ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), + new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), shard.commitStats()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath()); @@ -790,7 +515,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { StreamInput in = out.bytes().streamInput(); stats = ShardStats.readShardStats(in); } - XContentBuilder builder = XContentFactory.jsonBuilder(); + XContentBuilder builder = jsonBuilder(); builder.startObject(); stats.toXContent(builder, EMPTY_PARAMS); builder.endObject(); @@ -802,9 +527,12 @@ public class IndexShardTests extends ESSingleNodeTestCase { expectedSubSequence.append("\",\"is_custom_data_path\":").append(shard.shardPath().isCustomDataPath()).append("}"); assumeFalse("Some path weirdness on windows", Constants.WINDOWS); assertTrue(xContent.contains(expectedSubSequence)); + + closeShards(shard); } - private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { + private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, + ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); document.add(uidField); @@ -813,12 +541,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { } public void testIndexingOperationsListeners() throws IOException { - createIndex("test_iol"); - ensureGreen(); - client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test_iol")); - IndexShard shard = test.getShardOrNull(0); + IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); AtomicInteger preIndex = new AtomicInteger(); AtomicInteger postIndexCreate = new AtomicInteger(); AtomicInteger postIndexUpdate = new AtomicInteger(); @@ -827,7 +551,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { AtomicInteger postDelete = new AtomicInteger(); AtomicInteger postDeleteException = new AtomicInteger(); shard.close("simon says", true); - shard = reinitWithWrapper(test, shard, null, new IndexingOperationListener() { + shard = reinitShard(shard, new IndexingOperationListener() { @Override public Engine.Index preIndex(Engine.Index operation) { preIndex.incrementAndGet(); @@ -836,7 +560,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { @Override public void postIndex(Engine.Index index, boolean created) { - if(created) { + if (created) { postIndexCreate.incrementAndGet(); } else { postIndexUpdate.incrementAndGet(); @@ -865,8 +589,10 @@ public class IndexShardTests extends ESSingleNodeTestCase { } }); + recoveryShardFromStore(shard); - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null); + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), + new BytesArray(new byte[]{1}), null); Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); shard.index(index); assertEquals(1, preIndex.get()); @@ -928,95 +654,12 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertEquals(1, preDelete.get()); assertEquals(1, postDelete.get()); assertEquals(0, postDeleteException.get()); - } - public void testMaybeFlush() throws Exception { - createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build()); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - assertFalse(shard.shouldFlush()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); - assertFalse(shard.shouldFlush()); - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null); - Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); - shard.index(index); - assertTrue(shard.shouldFlush()); - assertEquals(2, shard.getEngine().getTranslog().totalOperations()); - client().prepareIndex("test", "test", "2").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); - assertBusy(() -> { // this is async - assertFalse(shard.shouldFlush()); - }); - assertEquals(0, shard.getEngine().getTranslog().totalOperations()); - shard.getEngine().getTranslog().sync(); - long size = shard.getEngine().getTranslog().sizeInBytes(); - logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) - .build()).get(); - client().prepareDelete("test", "test", "2").get(); - logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); - assertBusy(() -> { // this is async - logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); - assertFalse(shard.shouldFlush()); - }); - assertEquals(0, shard.getEngine().getTranslog().totalOperations()); - } - - public void testStressMaybeFlush() throws Exception { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - assertFalse(shard.shouldFlush()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); - assertFalse(shard.shouldFlush()); - final AtomicBoolean running = new AtomicBoolean(true); - final int numThreads = randomIntBetween(2, 4); - Thread[] threads = new Thread[numThreads]; - CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - barrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new RuntimeException(e); - } - while (running.get()) { - shard.maybeFlush(); - } - } - }; - threads[i].start(); - } - barrier.await(); - FlushStats flushStats = shard.flushStats(); - long total = flushStats.getTotal(); - client().prepareIndex("test", "test", "1").setSource("{}").get(); - assertBusy(() -> { - assertEquals(total + 1, shard.flushStats().getTotal()); - }); - running.set(false); - for (int i = 0; i < threads.length; i++) { - threads[i].join(); - } - assertEquals(total + 1, shard.flushStats().getTotal()); + closeShards(shard); } public void testLockingBeforeAndAfterRelocated() throws Exception { - assertAcked(client().admin().indices().prepareCreate("test").setSettings( - Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) - ).get()); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - assertBusy(() -> assertThat(shard.state(), equalTo(IndexShardState.STARTED))); + final IndexShard shard = newStartedShard(true); CountDownLatch latch = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { latch.countDown(); @@ -1041,17 +684,12 @@ public class IndexShardTests extends ESSingleNodeTestCase { // lock can again be acquired assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); } + + closeShards(shard); } public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { - assertAcked(client().admin().indices().prepareCreate("test").setSettings( - Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) - ).get()); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - assertBusy(() -> assertThat(shard.state(), equalTo(IndexShardState.STARTED))); + final IndexShard shard = newStartedShard(true); Thread recoveryThread = new Thread(() -> { try { shard.relocated("simulated recovery"); @@ -1079,16 +717,12 @@ public class IndexShardTests extends ESSingleNodeTestCase { } recoveryThread.join(); + + closeShards(shard); } public void testStressRelocated() throws Exception { - assertAcked(client().admin().indices().prepareCreate("test").setSettings( - Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) - ).get()); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); + final IndexShard shard = newStartedShard(true); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; CountDownLatch allPrimaryOperationLocksAcquired = new CountDownLatch(numThreads); @@ -1136,85 +770,69 @@ public class IndexShardTests extends ESSingleNodeTestCase { for (Thread indexThread : indexThreads) { indexThread.join(); } + + closeShards(shard); } public void testRecoverFromStore() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); + final IndexShard shard = newStartedShard(true); int translogOps = 1; - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + indexDoc(shard, "test", "0"); if (randomBoolean()) { - client().admin().indices().prepareFlush().get(); + flushShard(shard); translogOps = 0; } - ShardRouting routing = shard.routingEntry(); - test.removeShard(0, "b/c simon says so"); - routing = ShardRoutingHelper.reinitPrimary(routing); - IndexShard newShard = test.createShard(routing); - newShard.updateRoutingEntry(routing); + IndexShard newShard = reinitShard(shard); DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); + newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); assertEquals(translogOps, newShard.recoveryState().getTranslog().recoveredOperations()); assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperations()); assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperationsOnStart()); assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f); - newShard.updateRoutingEntry(routing.moveToStarted()); - SearchResponse response = client().prepareSearch().get(); - assertHitCount(response, 1); + newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + assertDocCount(newShard, 1); + closeShards(newShard); } public void testRecoverFromCleanStore() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + final IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0"); if (randomBoolean()) { - client().admin().indices().prepareFlush().get(); + flushShard(shard); } - ShardRouting routing = shard.routingEntry(); - test.removeShard(0, "b/c simon says so"); - routing = ShardRoutingHelper.reinitPrimary(routing, UnassignedInfo.Reason.INDEX_CREATED, StoreRecoverySource.EMPTY_STORE_INSTANCE); - IndexShard newShard = test.createShard(routing); - newShard.updateRoutingEntry(routing); + final ShardRouting shardRouting = shard.routingEntry(); + IndexShard newShard = reinitShard(shard, + ShardRoutingHelper.initWithSameId(shardRouting, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE) + ); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); + newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations()); assertEquals(0, newShard.recoveryState().getTranslog().totalOperations()); assertEquals(0, newShard.recoveryState().getTranslog().totalOperationsOnStart()); assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f); - newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted()); - SearchResponse response = client().prepareSearch().get(); - assertHitCount(response, 0); + newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + assertDocCount(newShard, 0); + closeShards(newShard); } public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + final IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0"); if (randomBoolean()) { - client().admin().indices().prepareFlush().get(); + flushShard(shard); } - final ShardRouting origRouting = shard.routingEntry(); - ShardRouting routing = origRouting; + Store store = shard.store(); store.incRef(); - test.removeShard(0, "b/c simon says so"); + closeShards(shard); cleanLuceneIndex(store.directory()); store.decRef(); - routing = ShardRoutingHelper.reinitPrimary(routing); - IndexShard newShard = test.createShard(routing); + IndexShard newShard = reinitShard(shard); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + ShardRouting routing = newShard.routingEntry(); newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); try { newShard.recoverFromStore(); @@ -1224,7 +842,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } routing = ShardRoutingHelper.moveToUnassigned(routing, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so")); - routing = ShardRoutingHelper.initialize(routing, origRouting.currentNodeId()); + routing = ShardRoutingHelper.initialize(routing, newShard.routingEntry().currentNodeId()); assertTrue("it's already recovering, we should ignore new ones", newShard.ignoreRecoveryAttempt()); try { newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); @@ -1232,30 +850,25 @@ public class IndexShardTests extends ESSingleNodeTestCase { } catch (IllegalIndexShardStateException e) { // OK! } - test.removeShard(0, "I broken it"); - routing = routing.updateUnassigned(routing.unassignedInfo(), StoreRecoverySource.EMPTY_STORE_INSTANCE); - newShard = test.createShard(routing); - newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); + + newShard = reinitShard(newShard, + ShardRoutingHelper.initWithSameId(routing, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE)); + newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue("recover even if there is nothing to recover", newShard.recoverFromStore()); - newShard.updateRoutingEntry(getInitializingShardRouting(routing).moveToStarted()); - SearchResponse response = client().prepareSearch().get(); - assertHitCount(response, 0); + newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + assertDocCount(newShard, 0); // we can't issue this request through a client because of the inconsistencies we created with the cluster state // doing it directly instead - IndexRequest request = client().prepareIndex("test", "test", "0").setSource("{}").request(); - request.process(null, false, "test"); - TransportIndexAction.executeIndexRequestOnPrimary(request, newShard, null); + indexDoc(newShard, "test", "0"); newShard.refresh("test"); - assertHitCount(client().prepareSearch().get(), 1); + assertDocCount(newShard, 1); + + closeShards(newShard); } public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedException, IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); + final IndexShard shard = newStartedShard(true); ShardRouting origRouting = shard.routingEntry(); assertThat(shard.state(), equalTo(IndexShardState.STARTED)); ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node"); @@ -1267,37 +880,37 @@ public class IndexShardTests extends ESSingleNodeTestCase { fail("Expected IndexShardRelocatedException"); } catch (IndexShardRelocatedException expected) { } + + closeShards(shard); } public void testRestoreShard() throws IOException { - createIndex("test"); - createIndex("test_target"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("test")); - IndexService test_target = indicesService.indexService(resolveIndex("test_target")); - final IndexShard test_shard = test.getShardOrNull(0); + final IndexShard source = newStartedShard(true); + IndexShard target = newStartedShard(true); - client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); - client().prepareIndex("test_target", "test", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get(); - assertHitCount(client().prepareSearch("test_target").get(), 1); - assertSearchHits(client().prepareSearch("test_target").get(), "1"); - client().admin().indices().prepareFlush("test").get(); // only flush test - final ShardRouting origRouting = test_target.getShardOrNull(0).routingEntry(); + indexDoc(source, "test", "0"); + if (randomBoolean()) { + source.refresh("test"); + } + indexDoc(target, "test", "1"); + target.refresh("test"); + assertDocs(target, new Uid("test", "1")); + flushShard(source); // only flush source + final ShardRouting origRouting = target.routingEntry(); ShardRouting routing = ShardRoutingHelper.reinitPrimary(origRouting); final Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID())); - routing = ShardRoutingHelper.newWithRestoreSource(routing, new SnapshotRecoverySource(snapshot, Version.CURRENT, "test")); - test_target.removeShard(0, "just do it man!"); - final IndexShard test_target_shard = test_target.createShard(routing); - Store sourceStore = test_shard.store(); - Store targetStore = test_target_shard.store(); + routing = ShardRoutingHelper.newWithRestoreSource(routing, + new RecoverySource.SnapshotRecoverySource(snapshot, Version.CURRENT, "test")); + target = reinitShard(target, routing); + Store sourceStore = source.store(); + Store targetStore = target.store(); - test_target_shard.updateRoutingEntry(routing); DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - test_target_shard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); - assertTrue(test_target_shard.restoreFromRepository(new RestoreOnlyRepository("test") { + target.markAsRecovering("store", new RecoveryState(routing, localNode, null)); + assertTrue(target.restoreFromRepository(new RestoreOnlyRepository("test") { @Override - public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { try { cleanLuceneIndex(targetStore.directory()); for (String file : sourceStore.directory().listAll()) { @@ -1312,19 +925,17 @@ public class IndexShardTests extends ESSingleNodeTestCase { } })); - test_target_shard.updateRoutingEntry(routing.moveToStarted()); - assertHitCount(client().prepareSearch("test_target").get(), 1); - assertSearchHits(client().prepareSearch("test_target").get(), "0"); + target.updateRoutingEntry(routing.moveToStarted()); + assertDocs(target, new Uid("test", "0")); + + closeShards(source, target); } public void testSearcherWrapperIsUsed() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService(resolveIndex("test")); - IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); + IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); + shard.refresh("test"); Engine.GetResult getResult = shard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1")))); assertTrue(getResult.exists()); @@ -1347,35 +958,28 @@ public class IndexShardTests extends ESSingleNodeTestCase { return searcher; } }; - shard.close("simon says", true); - IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper); - try { - try (Engine.Searcher searcher = newShard.acquireSearcher("test")) { - TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10); - assertEquals(search.totalHits, 0); - search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); - assertEquals(search.totalHits, 1); - } - getResult = newShard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1")))); - assertTrue(getResult.exists()); - assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader - assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader); - getResult.release(); - } finally { - newShard.close("just do it", randomBoolean()); + closeShards(shard); + IndexShard newShard = newShard(ShardRoutingHelper.reinitPrimary(shard.routingEntry()), + shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper); + + recoveryShardFromStore(newShard); + + try (Engine.Searcher searcher = newShard.acquireSearcher("test")) { + TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10); + assertEquals(search.totalHits, 0); + search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); + assertEquals(search.totalHits, 1); } + getResult = newShard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1")))); + assertTrue(getResult.exists()); + assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader + assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader); + getResult.release(); + + closeShards(newShard); } - public void testSearcherWrapperWorksWithGlobaOrdinals() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService(resolveIndex("test")); - IndexShard shard = indexService.getShardOrNull(0); - client().admin().indices().preparePutMapping("test").setType("test").setSource("foo", "type=text,fielddata=true").get(); - client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - + public void testSearcherWrapperWorksWithGlobalOrdinals() throws IOException { IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override public DirectoryReader wrap(DirectoryReader reader) throws IOException { @@ -1388,46 +992,53 @@ public class IndexShardTests extends ESSingleNodeTestCase { } }; - shard.close("simon says", true); - IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper); - try { - // test global ordinals are evicted - MappedFieldType foo = newShard.mapperService().fullName("foo"); - IndexFieldData.Global ifd = shard.indexFieldDataService().getForField(foo); - FieldDataStats before = shard.fieldData().stats("foo"); - assertThat(before.getMemorySizeInBytes(), equalTo(0L)); - FieldDataStats after = null; - try (Engine.Searcher searcher = newShard.acquireSearcher("test")) { - assumeTrue("we have to have more than one segment", searcher.getDirectoryReader().leaves().size() > 1); - ifd.loadGlobal(searcher.getDirectoryReader()); - after = shard.fieldData().stats("foo"); - assertEquals(after.getEvictions(), before.getEvictions()); - // If a field doesn't exist an empty IndexFieldData is returned and that isn't cached: - assertThat(after.getMemorySizeInBytes(), equalTo(0L)); - } - assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions()); - assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes()); - newShard.flush(new FlushRequest().force(true).waitIfOngoing(true)); - newShard.refresh("test"); - assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes()); - assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions()); - } finally { - newShard.close("just do it", randomBoolean()); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, wrapper); + recoveryShardFromStore(shard); + indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + shard.refresh("created segment 1"); + indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); + shard.refresh("created segment 2"); + + // test global ordinals are evicted + MappedFieldType foo = shard.mapperService().fullName("foo"); + IndexFieldData.Global ifd = shard.indexFieldDataService().getForField(foo); + FieldDataStats before = shard.fieldData().stats("foo"); + assertThat(before.getMemorySizeInBytes(), equalTo(0L)); + FieldDataStats after = null; + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + assumeTrue("we have to have more than one segment", searcher.getDirectoryReader().leaves().size() > 1); + ifd.loadGlobal(searcher.getDirectoryReader()); + after = shard.fieldData().stats("foo"); + assertEquals(after.getEvictions(), before.getEvictions()); + // If a field doesn't exist an empty IndexFieldData is returned and that isn't cached: + assertThat(after.getMemorySizeInBytes(), equalTo(0L)); } + assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions()); + assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes()); + shard.flush(new FlushRequest().force(true).waitIfOngoing(true)); + shard.refresh("test"); + assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes()); + assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions()); + + closeShards(shard); } - public void testIndexingOperationListnenersIsInvokedOnRecovery() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService(resolveIndex("test")); - IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get(); - client().prepareDelete("test", "test", "0").get(); - client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; - shard.close("simon says", false); + public void testIndexingOperationListenersIsInvokedOnRecovery() throws IOException { + IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + deleteDoc(shard, "test", "0"); + indexDoc(shard, "test", "1", "{\"foo\" : \"bar\"}"); + shard.refresh("test"); + final AtomicInteger preIndex = new AtomicInteger(); final AtomicInteger postIndex = new AtomicInteger(); final AtomicInteger preDelete = new AtomicInteger(); @@ -1456,85 +1067,28 @@ public class IndexShardTests extends ESSingleNodeTestCase { } }; - final IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper, listener); - try { - IndexingStats indexingStats = newShard.indexingStats(); - // ensure we are not influencing the indexing stats - assertEquals(0, indexingStats.getTotal().getDeleteCount()); - assertEquals(0, indexingStats.getTotal().getDeleteCurrent()); - assertEquals(0, indexingStats.getTotal().getIndexCount()); - assertEquals(0, indexingStats.getTotal().getIndexCurrent()); - assertEquals(0, indexingStats.getTotal().getIndexFailedCount()); - assertEquals(2, preIndex.get()); - assertEquals(2, postIndex.get()); - assertEquals(1, preDelete.get()); - assertEquals(1, postDelete.get()); - } finally { - newShard.close("just do it", randomBoolean()); - } + final IndexShard newShard = reinitShard(shard, listener); + recoveryShardFromStore(newShard); + IndexingStats indexingStats = newShard.indexingStats(); + // ensure we are not influencing the indexing stats + assertEquals(0, indexingStats.getTotal().getDeleteCount()); + assertEquals(0, indexingStats.getTotal().getDeleteCurrent()); + assertEquals(0, indexingStats.getTotal().getIndexCount()); + assertEquals(0, indexingStats.getTotal().getIndexCurrent()); + assertEquals(0, indexingStats.getTotal().getIndexFailedCount()); + assertEquals(2, preIndex.get()); + assertEquals(2, postIndex.get()); + assertEquals(1, preDelete.get()); + assertEquals(1, postDelete.get()); + + closeShards(newShard); } - public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService(resolveIndex("test")); - IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get(); - client().prepareDelete("test", "test", "0").get(); - client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - - IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; - shard.close("simon says", false); - AtomicReference shardRef = new AtomicReference<>(); - List failures = new ArrayList<>(); - IndexingOperationListener listener = new IndexingOperationListener() { - - @Override - public void postIndex(Engine.Index index, boolean created) { - try { - assertNotNull(shardRef.get()); - // this is all IMC needs to do - check current memory and refresh - assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); - shardRef.get().refresh("test"); - } catch (Exception e) { - failures.add(e); - throw e; - } - } - - - @Override - public void postDelete(Engine.Delete delete) { - try { - assertNotNull(shardRef.get()); - // this is all IMC needs to do - check current memory and refresh - assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); - shardRef.get().refresh("test"); - } catch (Exception e) { - failures.add(e); - throw e; - } - } - }; - final IndexShard newShard = newIndexShard(indexService, shard, wrapper, listener); - shardRef.set(newShard); - recoverShard(newShard); - - try { - ExceptionsHelper.rethrowAndSuppress(failures); - } finally { - newShard.close("just do it", randomBoolean()); - } - } public void testSearchIsReleaseIfWrapperFails() throws IOException { - createIndex("test"); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService(resolveIndex("test")); - IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); + IndexShard shard = newStartedShard(true); + indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + shard.refresh("test"); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override public DirectoryReader wrap(DirectoryReader reader) throws IOException { @@ -1546,180 +1100,147 @@ public class IndexShardTests extends ESSingleNodeTestCase { } }; - shard.close("simon says", true); - IndexShard newShard = reinitWithWrapper(indexService, shard, wrapper); + closeShards(shard); + IndexShard newShard = newShard(ShardRoutingHelper.reinitPrimary(shard.routingEntry()), + shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper); + + recoveryShardFromStore(newShard); + try { newShard.acquireSearcher("test"); fail("exception expected"); } catch (RuntimeException ex) { // - } finally { - newShard.close("just do it", randomBoolean()); } + closeShards(newShard); } - public static final IndexShard reinitWithWrapper(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, IndexingOperationListener... listeners) throws IOException { - IndexShard newShard = newIndexShard(indexService, shard, wrapper, listeners); - return recoverShard(newShard); - } - - public static final IndexShard recoverShard(IndexShard newShard) throws IOException { - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); - assertTrue(newShard.recoverFromStore()); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); - return newShard; - } - - public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, IndexingOperationListener... listeners) throws IOException { - ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); - IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), - shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), - indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, - indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners)); - return newShard; - } - - private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) { - ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(), - existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING, - existingShardRouting.allocationId()); - shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"), - StoreRecoverySource.EXISTING_STORE_INSTANCE); - return shardRouting; - } public void testTranslogRecoverySyncsTranslog() throws IOException { - createIndex("testindexfortranslogsync"); - client().admin().indices().preparePutMapping("testindexfortranslogsync").setType("testtype").setSource(jsonBuilder().startObject() - .startObject("testtype") - .startObject("properties") - .startObject("foo") - .field("type", "text") - .endObject() - .endObject().endObject().endObject()).get(); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("testindexfortranslogsync")); - IndexShard shard = test.getShardOrNull(0); - ShardRouting routing = getInitializingShardRouting(shard.routingEntry()); - test.removeShard(0, "b/c britta says so"); - IndexShard newShard = test.createShard(routing); - DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - newShard.markAsRecovering("for testing", new RecoveryState(routing, localNode, null)); - List operations = new ArrayList<>(); - operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes()))); - newShard.prepareForIndexRecovery(); - newShard.recoveryState().getTranslog().totalOperations(operations.size()); - newShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); - newShard.performBatchRecovery(operations); - assertFalse(newShard.getTranslog().syncNeeded()); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoveryShardFromStore(primary); + + indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null); + recoverReplica(replica, primary, (shard, discoveryNode) -> + new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> { + }) { + @Override + public void indexTranslogOperations(List operations, int totalTranslogOps) { + super.indexTranslogOperations(operations, totalTranslogOps); + assertFalse(replica.getTranslog().syncNeeded()); + } + }, true); + + closeShards(primary, replica); } - public void testIndexingBufferDuringInternalRecovery() throws IOException { - createIndex("index"); - client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject() - .startObject("testtype") - .startObject("properties") - .startObject("foo") - .field("type", "text") - .endObject() - .endObject().endObject().endObject()).get(); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("index")); - IndexShard shard = test.getShardOrNull(0); - ShardRouting routing = getInitializingShardRouting(shard.routingEntry()); - test.removeShard(0, "b/c britta says so"); - IndexShard newShard = test.createShard(routing); - newShard.shardRouting = routing; + public void testShardActiveDuringInternalRecovery() throws IOException { + IndexShard shard = newStartedShard(true); + indexDoc(shard, "type", "0"); + shard = reinitShard(shard); DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - newShard.markAsRecovering("for testing", new RecoveryState(routing, localNode, null)); + shard.markAsRecovering("for testing", new RecoveryState(shard.routingEntry(), localNode, null)); // Shard is still inactive since we haven't started recovering yet - assertFalse(newShard.isActive()); - newShard.prepareForIndexRecovery(); + assertFalse(shard.isActive()); + shard.prepareForIndexRecovery(); // Shard is still inactive since we haven't started recovering yet - assertFalse(newShard.isActive()); - newShard.performTranslogRecovery(true); + assertFalse(shard.isActive()); + shard.performTranslogRecovery(true); // Shard should now be active since we did recover: - assertTrue(newShard.isActive()); + assertTrue(shard.isActive()); + closeShards(shard); } - public void testIndexingBufferDuringPeerRecovery() throws IOException { - createIndex("index"); - client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject() - .startObject("testtype") - .startObject("properties") - .startObject("foo") - .field("type", "text") - .endObject() - .endObject().endObject().endObject()).get(); - ensureGreen(); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("index")); - IndexShard shard = test.getShardOrNull(0); - ShardRouting routing = getInitializingShardRouting(shard.routingEntry()); - test.removeShard(0, "b/c britta says so"); - IndexShard newShard = test.createShard(routing); + public void testShardActiveDuringPeerRecovery() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoveryShardFromStore(primary); + + indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null); DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - newShard.markAsRecovering("for testing", new RecoveryState(routing, localNode, null)); + replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); // Shard is still inactive since we haven't started recovering yet - assertFalse(newShard.isActive()); - List operations = new ArrayList<>(); - operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes()))); - newShard.prepareForIndexRecovery(); - newShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); - // Shard is still inactive since we haven't started recovering yet - assertFalse(newShard.isActive()); - newShard.performBatchRecovery(operations); - // Shard should now be active since we did recover: - assertTrue(newShard.isActive()); + assertFalse(replica.isActive()); + recoverReplica(replica, primary, (shard, discoveryNode) -> + new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> { + }) { + @Override + public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException { + super.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp); + // Shard is still inactive since we haven't started recovering yet + assertFalse(replica.isActive()); + + } + + @Override + public void indexTranslogOperations(List operations, int totalTranslogOps) { + super.indexTranslogOperations(operations, totalTranslogOps); + // Shard should now be active since we did recover: + assertTrue(replica.isActive()); + } + }, false); + + closeShards(primary, replica); } public void testRecoverFromLocalShard() throws IOException { - createIndex("index"); - createIndex("index_1"); - createIndex("index_2"); - client().admin().indices().preparePutMapping("index").setType("test").setSource(jsonBuilder().startObject() - .startObject("test") - .startObject("properties") - .startObject("foo") - .field("type", "text") - .endObject() - .endObject().endObject().endObject()).get(); - client().prepareIndex("index", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); - client().prepareIndex("index", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get(); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("source") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + + IndexShard sourceShard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoveryShardFromStore(sourceShard); + + indexDoc(sourceShard, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(sourceShard, "test", "1", "{\"foo\" : \"bar\"}"); + sourceShard.refresh("test"); - IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService(resolveIndex("index_1")); - IndexShard shard = test.getShardOrNull(0); - ShardRouting routing = ShardRoutingHelper.initWithSameId(shard.routingEntry(), LocalShardsRecoverySource.INSTANCE); - test.removeShard(0, "b/c simon says so"); + ShardRouting targetRouting = TestShardRouting.newShardRouting(new ShardId("index_1", "index_1", 0), "n1", true, + ShardRoutingState.INITIALIZING, RecoverySource.LocalShardsRecoverySource.INSTANCE); + + final IndexShard targetShard; DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); + Map requestedMappingUpdates = ConcurrentCollections.newConcurrentMap(); { - final IndexShard newShard = test.createShard(routing); - newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); + targetShard = newShard(targetRouting); + targetShard.markAsRecovering("store", new RecoveryState(targetShard.routingEntry(), localNode, null)); BiConsumer mappingConsumer = (type, mapping) -> { - try { - client().admin().indices().preparePutMapping().setConcreteIndex(newShard.indexSettings().getIndex()) - .setType(type) - .setSource(mapping.source().string()) - .get(); - } catch (IOException ex) { - throw new ElasticsearchException("failed to stringify mapping source", ex); - } + assertNull(requestedMappingUpdates.put(type, mapping)); }; - expectThrows(IllegalArgumentException.class, () -> { - IndexService index = indicesService.indexService(resolveIndex("index")); - IndexService index_2 = indicesService.indexService(resolveIndex("index_2")); - newShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(index.getShard(0), index_2.getShard(0))); - }); - IndexService indexService = indicesService.indexService(resolveIndex("index")); - assertTrue(newShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(indexService.getShard(0)))); - RecoveryState recoveryState = newShard.recoveryState(); + final IndexShard differentIndex = newShard(new ShardId("index_2", "index_2", 0), true); + recoveryShardFromStore(differentIndex); + expectThrows(IllegalArgumentException.class, () -> { + targetShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(sourceShard, differentIndex)); + }); + closeShards(differentIndex); + + assertTrue(targetShard.recoverFromLocalShards(mappingConsumer, Arrays.asList(sourceShard))); + RecoveryState recoveryState = targetShard.recoveryState(); assertEquals(RecoveryState.Stage.DONE, recoveryState.getStage()); assertTrue(recoveryState.getIndex().fileDetails().size() > 0); for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) { @@ -1729,96 +1250,115 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertEquals(file.recovered(), file.length()); } } - routing = ShardRoutingHelper.moveToStarted(routing); - newShard.updateRoutingEntry(routing); - assertHitCount(client().prepareSearch("index_1").get(), 2); + targetShard.updateRoutingEntry(ShardRoutingHelper.moveToStarted(targetShard.routingEntry())); + assertDocCount(targetShard, 2); } // now check that it's persistent ie. that the added shards are committed { - routing = shard.routingEntry(); - test.removeShard(0, "b/c simon says so"); - routing = ShardRoutingHelper.reinitPrimary(routing); - final IndexShard newShard = test.createShard(routing); - newShard.markAsRecovering("store", new RecoveryState(routing, localNode, null)); - assertTrue(newShard.recoverFromStore()); - routing = ShardRoutingHelper.moveToStarted(routing); - newShard.updateRoutingEntry(routing); - assertHitCount(client().prepareSearch("index_1").get(), 2); + final IndexShard newShard = reinitShard(targetShard); + recoveryShardFromStore(newShard); + assertDocCount(newShard, 2); + closeShards(newShard); } - GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("index_1").get(); - ImmutableOpenMap> mappings = mappingsResponse.getMappings(); - assertNotNull(mappings.get("index_1")); - assertNotNull(mappings.get("index_1").get("test")); - assertEquals(mappings.get("index_1").get("test").get().source().string(), "{\"test\":{\"properties\":{\"foo\":{\"type\":\"text\"}}}}"); + assertThat(requestedMappingUpdates, hasKey("test")); + assertThat(requestedMappingUpdates.get("test").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}")); + closeShards(sourceShard, targetShard); } /** A dummy repository for testing which just needs restore overridden */ private abstract static class RestoreOnlyRepository extends AbstractLifecycleComponent implements Repository { private final String indexName; + public RestoreOnlyRepository(String indexName) { super(Settings.EMPTY); this.indexName = indexName; } + @Override - protected void doStart() {} + protected void doStart() { + } + @Override - protected void doStop() {} + protected void doStop() { + } + @Override - protected void doClose() {} + protected void doClose() { + } + @Override public RepositoryMetaData getMetadata() { return null; } + @Override public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { return null; } + @Override public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException { return null; } + @Override public RepositoryData getRepositoryData() { Map> map = new HashMap<>(); - map.put(new IndexId(indexName, "blah"), Collections.emptySet()); + map.put(new IndexId(indexName, "blah"), emptySet()); return new RepositoryData(Collections.emptyList(), map); } + @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) {} + public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { + } + @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures) { return null; } + @Override - public void deleteSnapshot(SnapshotId snapshotId) {} + public void deleteSnapshot(SnapshotId snapshotId) { + } + @Override public long getSnapshotThrottleTimeInNanos() { return 0; } + @Override public long getRestoreThrottleTimeInNanos() { return 0; } + @Override public String startVerification() { return null; } + @Override - public void endVerification(String verificationToken) {} + public void endVerification(String verificationToken) { + } + @Override public boolean isReadOnly() { return false; } + @Override - public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {} + public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { return null; } + @Override - public void verify(String verificationToken, DiscoveryNode localNode) {} + public void verify(String verificationToken, DiscoveryNode localNode) { + } } public static Engine getEngineFromShard(IndexShard shard) { diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java index e960622d1c1..34c1789824e 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -64,4 +65,7 @@ public class ShardUtilsTests extends ESTestCase { IOUtils.close(writer, dir); } + public static Engine getShardEngine(IndexShard shard) { + return shard.getEngine(); + } } diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 858e046d67c..2d0e4a3aeb9 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.indices; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -31,7 +30,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardTests; +import org.elasticsearch.index.shard.IndexShardIT; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -443,7 +442,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { shard.writeIndexingBuffer(); } }; - final IndexShard newShard = IndexShardTests.newIndexShard(indexService, shard, wrapper, imc); + final IndexShard newShard = IndexShardIT.newIndexShard(indexService, shard, wrapper, imc); shardRef.set(newShard); try { assertEquals(0, imc.availableShards().size()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java similarity index 100% rename from core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java rename to test/framework/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingHelper.java diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java new file mode 100644 index 00000000000..23aed676af8 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -0,0 +1,477 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexNotFoundException; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.cache.IndexCache; +import org.elasticsearch.index.cache.query.DisabledQueryCache; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.IndexFieldDataService; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryFailedException; +import org.elasticsearch.indices.recovery.RecoverySourceHandler; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.indices.recovery.StartRecoveryRequest; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasSize; + +/** + * A base class for unit tests that need to create and shutdown {@link IndexShard} instances easily, + * containing utilities for shard creation and recoveries. See {{@link #newShard(boolean)}} and + * {@link #newStartedShard()} for a good starting points + */ +public abstract class IndexShardTestCase extends ESTestCase { + + protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { + @Override + public void onRecoveryDone(RecoveryState state) { + + } + + @Override + public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { + throw new AssertionError(e); + } + }; + + protected ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + } + + @Override + public void tearDown() throws Exception { + try { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } finally { + super.tearDown(); + } + } + + private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + final ShardId shardId = shardPath.getShardId(); + final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { + @Override + public Directory newDirectory() throws IOException { + return newFSDirectory(shardPath.resolveIndex()); + } + + @Override + public long throttleTimeInNanos() { + return 0; + } + }; + return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + } + + /** + * creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica + * (ready to recover from another shard) + */ + protected IndexShard newShard(boolean primary) throws IOException { + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), "n1", primary, + ShardRoutingState.INITIALIZING, + primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); + return newShard(shardRouting); + } + + /** + * creates a new initializing shard. The shard will have its own unique data path. + * + * @param shardRouting the {@link ShardRouting} to use for this shard + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard(ShardRouting shardRouting, IndexingOperationListener... listeners) throws IOException { + assert shardRouting.initializing() : shardRouting; + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName()) + .settings(settings) + .primaryTerm(0, 1); + return newShard(shardRouting, metaData.build(), listeners); + } + + /** + * creates a new initializing shard. The shard will have its own unique data path. + * + * @param shardId the shard id to use + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica + * (ready to recover from another shard) + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperationListener... listeners) throws IOException { + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAsciiOfLength(5), primary, + ShardRoutingState.INITIALIZING, + primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); + return newShard(shardRouting, listeners); + } + + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * supplied node id. + * + * @param shardId the shard id to use + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica + * (ready to recover from another shard) + */ + protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData, + @Nullable IndexSearcherWrapper searcherWrapper) throws IOException { + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING, + primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); + return newShard(shardRouting, indexMetaData, searcherWrapper); + } + + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * current node id the shard is assigned to. + * + * @param routing shard routing to use + * @param indexMetaData indexMetaData for the shard, including any mapping + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, IndexingOperationListener... listeners) + throws IOException { + return newShard(routing, indexMetaData, null, listeners); + } + + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * current node id the shard is assigned to. + * + * @param routing shard routing to use + * @param indexMetaData indexMetaData for the shard, including any mapping + * @param indexSearcherWrapper an optional wrapper to be used during searchers + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, + @Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingOperationListener... listeners) + throws IOException { + // add node id as name to settings for popper logging + final ShardId shardId = routing.shardId(); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, listeners); + } + + /** + * creates a new initializing shard. + * + * @param routing shard routing to use + * @param shardPath path to use for shard data + * @param indexMetaData indexMetaData for the shard, including any mapping + * @param indexSearcherWrapper an optional wrapper to be used during searchers + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, + @Nullable IndexSearcherWrapper indexSearcherWrapper, + IndexingOperationListener... listeners) throws IOException { + final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); + final IndexShard indexShard; + final Store store = createStore(indexSettings, shardPath); + boolean success = false; + try { + IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); + MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings()); + for (ObjectObjectCursor typeMapping : indexMetaData.getMappings()) { + mapperService.merge(typeMapping.key, typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true); + } + SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); + final IndexEventListener indexEventListener = new IndexEventListener() { + }; + final Engine.Warmer warmer = searcher -> { + }; + IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() { + }); + IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache, + new NoneCircuitBreakerService(), mapperService); + indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService, + indexFieldDataService, null, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, + Collections.emptyList(), Arrays.asList(listeners)); + success = true; + } finally { + if (success == false) { + IOUtils.close(store); + } + } + return indexShard; + } + + /** + * Takes an existing shard, closes it and and starts a new initialing shard at the same location + * + * @param listeners new listerns to use for the newly created shard + */ + protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException { + final ShardRouting shardRouting = current.routingEntry(); + return reinitShard(current, ShardRoutingHelper.initWithSameId(shardRouting, + shardRouting.primary() ? RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + ), listeners); + } + + /** + * Takes an existing shard, closes it and and starts a new initialing shard at the same location + * + * @param routing the shard routing to use for the newly created shard. + * @param listeners new listerns to use for the newly created shard + */ + protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException { + closeShards(current); + return newShard(routing, current.shardPath(), current.indexSettings().getIndexMetaData(), null, listeners); + } + + /** + * creates a new empyu shard and starts it. The shard will be either a replica or a primary. + */ + protected IndexShard newStartedShard() throws IOException { + return newStartedShard(randomBoolean()); + } + + /** + * creates a new empty shard and starts it. + * + * @param primary controls whether the shard will be a primary or a replica. + */ + protected IndexShard newStartedShard(boolean primary) throws IOException { + IndexShard shard = newShard(primary); + if (primary) { + recoveryShardFromStore(shard); + } else { + recoveryEmptyReplica(shard); + } + return shard; + } + + protected void closeShards(IndexShard... shards) throws IOException { + closeShards(Arrays.asList(shards)); + } + + protected void closeShards(Iterable shards) throws IOException { + for (IndexShard shard : shards) { + if (shard != null) { + try { + shard.close("test", false); + } finally { + IOUtils.close(shard.store()); + } + } + } + } + + protected void recoveryShardFromStore(IndexShard primary) throws IOException { + primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), + getFakeDiscoNode(primary.routingEntry().currentNodeId()), + null)); + primary.recoverFromStore(); + primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry())); + } + + protected void recoveryEmptyReplica(IndexShard replica) throws IOException { + IndexShard primary = null; + try { + primary = newStartedShard(true); + recoverReplica(replica, primary); + } finally { + closeShards(primary); + } + } + + private DiscoveryNode getFakeDiscoNode(String id) { + return new DiscoveryNode(id, new LocalTransportAddress("_fake_" + id), Version.CURRENT); + } + + /** recovers a replica from the given primary **/ + protected void recoverReplica(IndexShard replica, IndexShard primary) throws IOException { + recoverReplica(replica, primary, + (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, version -> { + }), + true); + } + + /** + * Recovers a replica from the give primary, allow the user to supply a custom recovery target. + * A typical usage of a custome recovery target is to assert things in the various stages of recovery + * + * @param markAsRecovering set to false if you have already marked the replica as recovering + */ + protected void recoverReplica(IndexShard replica, IndexShard primary, + BiFunction targetSupplier, + boolean markAsRecovering) + throws IOException { + final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId()); + final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId()); + if (markAsRecovering) { + replica.markAsRecovering("remote", + new RecoveryState(replica.routingEntry(), pNode, rNode)); + } else { + assertEquals(replica.state(), IndexShardState.RECOVERING); + } + replica.prepareForIndexRecovery(); + RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode); + StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode, + getMetadataSnapshotOrEmpty(replica), false, 0); + RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> { + }, + (int) ByteSizeUnit.MB.toKB(1), logger); + recovery.recoverToTarget(); + recoveryTarget.markAsDone(); + replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry())); + } + + private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException { + Store.MetadataSnapshot result; + try { + result = replica.snapshotStoreMetadata(); + } catch (IndexNotFoundException e) { + // OK! + result = Store.MetadataSnapshot.EMPTY; + } catch (IOException e) { + logger.warn("failed read store, treating as empty", e); + result = Store.MetadataSnapshot.EMPTY; + } + return result; + } + + protected Set getShardDocUIDs(final IndexShard shard) throws IOException { + shard.refresh("get_uids"); + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + Set ids = new HashSet<>(); + for (LeafReaderContext leafContext : searcher.reader().leaves()) { + LeafReader reader = leafContext.reader(); + Bits liveDocs = reader.getLiveDocs(); + for (int i = 0; i < reader.maxDoc(); i++) { + if (liveDocs == null || liveDocs.get(i)) { + Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME)); + ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME))); + } + } + } + return ids; + } + } + + protected void assertDocCount(IndexShard shard, int docDount) throws IOException { + assertThat(getShardDocUIDs(shard), hasSize(docDount)); + } + + protected void assertDocs(IndexShard shard, Uid... uids) throws IOException { + final Set shardDocUIDs = getShardDocUIDs(shard); + assertThat(shardDocUIDs, contains(uids)); + assertThat(shardDocUIDs, hasSize(uids.length)); + } + + + protected Engine.Index indexDoc(IndexShard shard, String type, String id) { + return indexDoc(shard, type, id, "{}"); + } + + protected Engine.Index indexDoc(IndexShard shard, String type, String id, String source) { + final Engine.Index index; + if (shard.routingEntry().primary()) { + index = shard.prepareIndexOnPrimary( + SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)), + Versions.MATCH_ANY, VersionType.INTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + } else { + index = shard.prepareIndexOnReplica( + SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)), + 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + } + shard.index(index); + return index; + } + + protected Engine.Delete deleteDoc(IndexShard shard, String type, String id) { + final Engine.Delete delete; + if (shard.routingEntry().primary()) { + delete = shard.prepareDeleteOnPrimary(type, id, Versions.MATCH_ANY, VersionType.INTERNAL); + } else { + delete = shard.prepareDeleteOnPrimary(type, id, 1, VersionType.EXTERNAL); + } + shard.delete(delete); + return delete; + } + + protected void flushShard(IndexShard shard) { + flushShard(shard, false); + } + + protected void flushShard(IndexShard shard, boolean force) { + shard.flush(new FlushRequest(shard.shardId().getIndexName()).force(force)); + } +} From 69bf08f6c6b12ccc22028f7b9a06f5d1673fac88 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 12 Sep 2016 12:06:29 -0400 Subject: [PATCH 08/18] Disable regexes by default in painless Adds a new node level, non-dynamic setting, `script.painless.regex.enabled` can be used to enable regexes. Closes #20397 --- docs/build.gradle | 3 ++ .../modules/scripting/painless.asciidoc | 9 +++++ .../painless/CompilerSettings.java | 30 +++++++++++++++++ .../painless/PainlessPlugin.java | 13 ++++++-- .../painless/PainlessScriptEngineService.java | 29 ++++++++++------ .../elasticsearch/painless/antlr/Walker.java | 5 +++ .../elasticsearch/painless/RegexTests.java | 11 ++++++- .../painless/ScriptTestCase.java | 10 +++++- .../painless/WhenThingsGoWrongTests.java | 15 +++++++-- .../test/plan_a/40_disabled.yaml | 33 +++++++++++++++++++ 10 files changed, 141 insertions(+), 17 deletions(-) create mode 100644 modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml diff --git a/docs/build.gradle b/docs/build.gradle index d930dfb5b60..6e2b02b0263 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -24,6 +24,9 @@ integTest { setting 'script.inline', 'true' setting 'script.stored', 'true' setting 'script.max_compilations_per_minute', '1000' + /* Enable regexes in painless so our tests don't complain about example + * snippets that use them. */ + setting 'script.painless.regex.enabled', 'true' Closure configFile = { extraConfigFile it, "src/test/cluster/config/$it" } diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc index 308f5564f24..7995e45e7b9 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/modules/scripting/painless.asciidoc @@ -196,6 +196,15 @@ POST hockey/player/1/_update [[modules-scripting-painless-regex]] === Regular expressions +NOTE: Regexes are disabled by default because they circumvent Painless's +protection against long running and memory hungry scripts. To make matters +worse even innocuous looking regexes can have staggering performance and stack +depth behavior. They remain an amazing powerful tool but are too scary to enable +by default. To enable them yourself set `script.painless.regex.enabled: true` in +`elasticsearch.yml`. We'd like very much to have a safe alternative +implementation that can be enabled by default so check this space for later +developments! + Painless's native support for regular expressions has syntax constructs: * `/pattern/`: Pattern literals create patterns. This is the only way to create diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java index f0e1bde74d0..9ef1b2ccf12 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/CompilerSettings.java @@ -19,10 +19,18 @@ package org.elasticsearch.painless; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; + /** * Settings to use when compiling a script. */ public final class CompilerSettings { + /** + * Are regexes enabled? This is a node level setting because regexes break out of painless's lovely sandbox and can cause stack + * overflows and we can't analyze the regex to be sure it won't. + */ + public static final Setting REGEX_ENABLED = Setting.boolSetting("script.painless.regex.enabled", false, Property.NodeScope); /** * Constant to be used when specifying the maximum loop counter when compiling a script. @@ -55,6 +63,12 @@ public final class CompilerSettings { */ private int initialCallSiteDepth = 0; + /** + * Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple + * looking regexes can cause stack overflows. + */ + private boolean regexesEnabled = false; + /** * Returns the value for the cumulative total number of statements that can be made in all loops * in a script before an exception is thrown. This attempts to prevent infinite loops. Note if @@ -104,4 +118,20 @@ public final class CompilerSettings { public void setInitialCallSiteDepth(int depth) { this.initialCallSiteDepth = depth; } + + /** + * Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple + * looking regexes can cause stack overflows. + */ + public boolean areRegexesEnabled() { + return regexesEnabled; + } + + /** + * Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple + * looking regexes can cause stack overflows. + */ + public void setRegexesEnabled(boolean regexesEnabled) { + this.regexesEnabled = regexesEnabled; + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 954314286bc..c00dc643102 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -20,19 +20,21 @@ package org.elasticsearch.painless; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.ScriptEngineRegistry; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptModule; + +import java.util.Arrays; +import java.util.List; /** * Registers Painless as a plugin. */ public final class PainlessPlugin extends Plugin implements ScriptPlugin { - // force to pare our definition at startup (not on the user's first script) + // force to parse our definition at startup (not on the user's first script) static { Definition.VOID_TYPE.hashCode(); } @@ -41,4 +43,9 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin { public ScriptEngineService getScriptEngineService(Settings settings) { return new PainlessScriptEngineService(settings); } + + @Override + public List> getSettings() { + return Arrays.asList(CompilerSettings.REGEX_ENABLED); + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java index 834593aeb99..cc165343994 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngineService.java @@ -53,11 +53,6 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme */ public static final String NAME = "painless"; - /** - * Default compiler settings to be used. - */ - private static final CompilerSettings DEFAULT_COMPILER_SETTINGS = new CompilerSettings(); - /** * Permissions context used during compilation. */ @@ -74,12 +69,19 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme }); } + /** + * Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside + * of {@link PainlessScriptEngineService#PainlessScriptEngineService(Settings)}. + */ + private final CompilerSettings defaultCompilerSettings = new CompilerSettings(); + /** * Constructor. * @param settings The settings to initialize the engine with. */ public PainlessScriptEngineService(final Settings settings) { super(settings); + defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings)); } /** @@ -111,29 +113,36 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme if (params.isEmpty()) { // Use the default settings. - compilerSettings = DEFAULT_COMPILER_SETTINGS; + compilerSettings = defaultCompilerSettings; } else { // Use custom settings specified by params. compilerSettings = new CompilerSettings(); - Map copy = new HashMap<>(params); - String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER); + // Except regexes enabled - this is a node level setting and can't be changed in the request. + compilerSettings.setRegexesEnabled(defaultCompilerSettings.areRegexesEnabled()); + + Map copy = new HashMap<>(params); + + String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER); if (value != null) { compilerSettings.setMaxLoopCounter(Integer.parseInt(value)); } value = copy.remove(CompilerSettings.PICKY); - if (value != null) { compilerSettings.setPicky(Boolean.parseBoolean(value)); } value = copy.remove(CompilerSettings.INITIAL_CALL_SITE_DEPTH); - if (value != null) { compilerSettings.setInitialCallSiteDepth(Integer.parseInt(value)); } + value = copy.remove(CompilerSettings.REGEX_ENABLED.getKey()); + if (value != null) { + throw new IllegalArgumentException("[painless.regex.enabled] can only be set on node startup."); + } + if (!copy.isEmpty()) { throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 61269419fdf..da430f4280a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -796,6 +796,11 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public ANode visitRegex(RegexContext ctx) { + if (false == settings.areRegexesEnabled()) { + throw location(ctx).createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] " + + "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep " + + "recursion and long loops.")); + } String text = ctx.REGEX().getText(); int lastSlash = text.lastIndexOf('/'); String pattern = text.substring(1, lastSlash); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index dbbb9958d71..1c53692ad74 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -19,17 +19,26 @@ package org.elasticsearch.painless; +import org.elasticsearch.common.settings.Settings; + import java.nio.CharBuffer; import java.util.Arrays; import java.util.HashSet; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; public class RegexTests extends ScriptTestCase { + @Override + protected Settings scriptEngineSettings() { + // Enable regexes just for this test. They are disabled by default. + return Settings.builder() + .put(CompilerSettings.REGEX_ENABLED.getKey(), true) + .build(); + } + public void testPatternAfterReturn() { assertEquals(true, exec("return 'foo' ==~ /foo/")); assertEquals(false, exec("return 'bar' ==~ /foo/")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 63c929a69a7..672204cbc25 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -45,7 +45,14 @@ public abstract class ScriptTestCase extends ESTestCase { @Before public void setup() { - scriptEngine = new PainlessScriptEngineService(Settings.EMPTY); + scriptEngine = new PainlessScriptEngineService(scriptEngineSettings()); + } + + /** + * Settings used to build the script engine. Override to customize settings like {@link RegexTests} does to enable regexes. + */ + protected Settings scriptEngineSettings() { + return Settings.EMPTY; } /** Compiles and returns the result of {@code script} */ @@ -71,6 +78,7 @@ public abstract class ScriptTestCase extends ESTestCase { if (picky) { CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); + pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); Walker.buildPainlessTree(getTestName(), script, pickySettings, null); } // test actual script execution diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index 1d60eb9c29a..7e4311f24ec 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -20,14 +20,13 @@ package org.elasticsearch.painless; import org.apache.lucene.util.Constants; -import org.elasticsearch.script.ScriptException; import java.lang.invoke.WrongMethodTypeException; import java.util.Arrays; import java.util.Collections; import static java.util.Collections.emptyMap; -import static org.hamcrest.Matchers.containsString; +import static java.util.Collections.singletonMap; public class WhenThingsGoWrongTests extends ScriptTestCase { public void testNullPointer() { @@ -234,4 +233,16 @@ public class WhenThingsGoWrongTests extends ScriptTestCase { exec("void recurse(int x, int y) {recurse(x, y)} recurse(1, 2);"); }); } + + public void testRegexDisabledByDefault() { + IllegalStateException e = expectThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/")); + assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. " + + "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage()); + } + + public void testCanNotOverrideRegexEnabled() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> exec("", null, singletonMap(CompilerSettings.REGEX_ENABLED.getKey(), "true"), null, false)); + assertEquals("[painless.regex.enabled] can only be set on node startup.", e.getMessage()); + } } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml new file mode 100644 index 00000000000..bcf02f657b0 --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/plan_a/40_disabled.yaml @@ -0,0 +1,33 @@ +--- +"Regex in update fails": + + - do: + index: + index: test_1 + type: test + id: 1 + body: + foo: bar + count: 1 + + - do: + catch: /Regexes are disabled. Set \[script.painless.regex.enabled\] to \[true\] in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep recursion and long loops./ + update: + index: test_1 + type: test + id: 1 + body: + script: + lang: painless + inline: "ctx._source.foo = params.bar ==~ /cat/" + params: { bar: 'xxx' } + +--- +"Regex enabled is not a dynamic setting": + + - do: + catch: /setting \[script.painless.regex.enabled\], not dynamically updateable/ + cluster.put_settings: + body: + transient: + script.painless.regex.enabled: true From b1e87aa13cb0590ebeb58cea4673e0e042e1185f Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 12 Sep 2016 16:21:39 -0400 Subject: [PATCH 09/18] Split allocator decision making from decision application (#20347) Splits the PrimaryShardAllocator and ReplicaShardAllocator's decision making for a shard from the implementation of that decision on the routing table. This is a step toward making it easier to use the same logic for the cluster allocation explain APIs. --- .../allocation/UnassignedShardDecision.java | 205 ++++++++++++ .../gateway/BaseGatewayShardAllocator.java | 88 +++++ .../gateway/PrimaryShardAllocator.java | 300 +++++++++++------- .../gateway/ReplicaShardAllocator.java | 206 ++++++------ .../UnassignedShardDecisionTests.java | 116 +++++++ .../cluster/ESAllocationTestCase.java | 15 +- 6 files changed, 714 insertions(+), 216 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java create mode 100644 core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java create mode 100644 core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java new file mode 100644 index 00000000000..172360849fa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecision.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.Nullable; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * Represents the allocation decision by an allocator for an unassigned shard. + */ +public class UnassignedShardDecision { + /** a constant representing a shard decision where no decision was taken */ + public static final UnassignedShardDecision DECISION_NOT_TAKEN = + new UnassignedShardDecision(null, null, null, null, null, null); + + @Nullable + private final Decision finalDecision; + @Nullable + private final AllocationStatus allocationStatus; + @Nullable + private final String finalExplanation; + @Nullable + private final String assignedNodeId; + @Nullable + private final String allocationId; + @Nullable + private final Map nodeDecisions; + + private UnassignedShardDecision(Decision finalDecision, + AllocationStatus allocationStatus, + String finalExplanation, + String assignedNodeId, + String allocationId, + Map nodeDecisions) { + assert finalExplanation != null || finalDecision == null : + "if a decision was taken, there must be an explanation for it"; + assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES : + "a yes decision must have a node to assign the shard to"; + assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES : + "only a yes decision should not have an allocation status"; + assert allocationId == null || assignedNodeId != null : + "allocation id can only be null if the assigned node is null"; + this.finalDecision = finalDecision; + this.allocationStatus = allocationStatus; + this.finalExplanation = finalExplanation; + this.assignedNodeId = assignedNodeId; + this.allocationId = allocationId; + this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; + } + + /** + * Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision. + */ + public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) { + return noDecision(allocationStatus, explanation, null); + } + + /** + * Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision, + * as well as the individual node-level decisions that comprised the final NO decision. + */ + public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, + String explanation, + @Nullable Map nodeDecisions) { + Objects.requireNonNull(explanation, "explanation must not be null"); + Objects.requireNonNull(allocationStatus, "allocationStatus must not be null"); + return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions); + } + + /** + * Creates a THROTTLE decision with the given explanation and individual node-level decisions that + * comprised the final THROTTLE decision. + */ + public static UnassignedShardDecision throttleDecision(String explanation, + Map nodeDecisions) { + Objects.requireNonNull(explanation, "explanation must not be null"); + return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null, + nodeDecisions); + } + + /** + * Creates a YES decision with the given explanation and individual node-level decisions that + * comprised the final YES decision, along with the node id to which the shard is assigned and + * the allocation id for the shard, if available. + */ + public static UnassignedShardDecision yesDecision(String explanation, + String assignedNodeId, + @Nullable String allocationId, + Map nodeDecisions) { + Objects.requireNonNull(explanation, "explanation must not be null"); + Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null"); + return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions); + } + + /** + * Returns true if a decision was taken by the allocator, {@code false} otherwise. + * If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}. + */ + public boolean isDecisionTaken() { + return finalDecision != null; + } + + /** + * Returns the final decision made by the allocator on whether to assign the unassigned shard. + * This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}. + */ + @Nullable + public Decision getFinalDecision() { + return finalDecision; + } + + /** + * Returns the final decision made by the allocator on whether to assign the unassigned shard. + * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will + * throw an {@code IllegalArgumentException}. + */ + public Decision getFinalDecisionSafe() { + if (isDecisionTaken() == false) { + throw new IllegalArgumentException("decision must have been taken in order to return the final decision"); + } + return finalDecision; + } + + /** + * Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if + * no decision was taken or if the decision was {@link Decision.Type#YES}. + */ + @Nullable + public AllocationStatus getAllocationStatus() { + return allocationStatus; + } + + /** + * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}. + */ + @Nullable + public String getFinalExplanation() { + return finalExplanation; + } + + /** + * Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}. + * Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will + * throw an {@code IllegalArgumentException}. + */ + public String getFinalExplanationSafe() { + if (isDecisionTaken() == false) { + throw new IllegalArgumentException("decision must have been taken in order to return the final explanation"); + } + return finalExplanation; + } + + /** + * Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns + * a value other than {@link Decision.Type#YES}, in which case this returns {@code null}. + */ + @Nullable + public String getAssignedNodeId() { + return assignedNodeId; + } + + /** + * Gets the allocation id for the existing shard copy that the allocator is assigning the shard to. + * This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value + * and the node on which the shard is assigned already has a shard copy with an in-sync allocation id + * that we can re-use. + */ + @Nullable + public String getAllocationId() { + return allocationId; + } + + /** + * Gets the individual node-level decisions that went into making the final decision as represented by + * {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision} + * as the decision for the given node. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } +} diff --git a/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java new file mode 100644 index 00000000000..3874d54f457 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; + +/** + * An abstract class that implements basic functionality for allocating + * shards to nodes based on shard copies that already exist in the cluster. + * + * Individual implementations of this class are responsible for providing + * the logic to determine to which nodes (if any) those shards are allocated. + */ +public abstract class BaseGatewayShardAllocator extends AbstractComponent { + + public BaseGatewayShardAllocator(Settings settings) { + super(settings); + } + + /** + * Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist. + * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)} + * to make decisions on assigning shards to nodes. + * + * @param allocation the allocation state container object + */ + public void allocateUnassigned(RoutingAllocation allocation) { + final RoutingNodes routingNodes = allocation.routingNodes(); + final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final ShardRouting shard = unassignedIterator.next(); + final UnassignedShardDecision unassignedShardDecision = makeAllocationDecision(shard, allocation, logger); + + if (unassignedShardDecision.isDecisionTaken() == false) { + // no decision was taken by this allocator + continue; + } + + if (unassignedShardDecision.getFinalDecisionSafe().type() == Decision.Type.YES) { + unassignedIterator.initialize(unassignedShardDecision.getAssignedNodeId(), + unassignedShardDecision.getAllocationId(), + shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE : + allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), + allocation.changes()); + } else { + unassignedIterator.removeAndIgnore(unassignedShardDecision.getAllocationStatus(), allocation.changes()); + } + } + } + + /** + * Make a decision on the allocation of an unassigned shard. This method is used by + * {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not + * the shard can be allocated by this allocator and if so, to which node it will be allocated. + * + * @param unassignedShard the unassigned shard to allocate + * @param allocation the current routing state + * @param logger the logger + * @return an {@link UnassignedShardDecision} with the final decision of whether to allocate and details of the decision + */ + public abstract UnassignedShardDecision makeAllocationDecision(ShardRouting unassignedShard, + RoutingAllocation allocation, + Logger logger); +} diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index a11300ac496..38afa11f5bd 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -19,12 +19,12 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; @@ -32,19 +32,23 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.AsyncShardFetch.FetchResult; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardStateMetaData; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -62,7 +66,7 @@ import java.util.stream.Collectors; * nor does it allocate primaries when a primary shard failed and there is a valid replica * copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}. */ -public abstract class PrimaryShardAllocator extends AbstractComponent { +public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { private static final Function INITIAL_SHARDS_PARSER = (value) -> { switch (value) { @@ -94,110 +98,161 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings)); } - public void allocateUnassigned(RoutingAllocation allocation) { - final RoutingNodes routingNodes = allocation.routingNodes(); - final MetaData metaData = allocation.metaData(); + /** + * Is the allocator responsible for allocating the given {@link ShardRouting}? + */ + private static boolean isResponsibleFor(final ShardRouting shard) { + return shard.primary() // must be primary + && shard.unassigned() // must be unassigned + // only handle either an existing store or a snapshot recovery + && (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE + || shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT); + } - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final ShardRouting shard = unassignedIterator.next(); + @Override + public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard, + final RoutingAllocation allocation, + final Logger logger) { + if (isResponsibleFor(unassignedShard) == false) { + // this allocator is not responsible for allocating this shard + return UnassignedShardDecision.DECISION_NOT_TAKEN; + } - if (shard.primary() == false) { - continue; - } + final boolean explain = allocation.debugDecision(); + final FetchResult shardState = fetchData(unassignedShard, allocation); + if (shardState.hasData() == false) { + allocation.setHasPendingAsyncFetch(); + return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA, + "still fetching shard state from the nodes in the cluster"); + } - if (shard.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE && - shard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) { - continue; - } + // don't create a new IndexSetting object for every shard as this could cause a lot of garbage + // on cluster restart if we allocate a boat load of shards + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index()); + final Set inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id()); + final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT; + final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData); - final AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); - if (shardState.hasData() == false) { - logger.trace("{}: ignoring allocation, still fetching shard started state", shard); - allocation.setHasPendingAsyncFetch(); - unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes()); - continue; - } + final NodeShardsResult nodeShardsResult; + final boolean enoughAllocationsFound; - // don't create a new IndexSetting object for every shard as this could cause a lot of garbage - // on cluster restart if we allocate a boat load of shards - final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); - final Set inSyncAllocationIds = indexMetaData.inSyncAllocationIds(shard.id()); - final boolean snapshotRestore = shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT; - final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData); - - final NodeShardsResult nodeShardsResult; - final boolean enoughAllocationsFound; - - if (inSyncAllocationIds.isEmpty()) { - assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new"; - // when we load an old index (after upgrading cluster) or restore a snapshot of an old index - // fall back to old version-based allocation mode - // Note that once the shard has been active, lastActiveAllocationIds will be non-empty - nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); - if (snapshotRestore || recoverOnAnyNode) { - enoughAllocationsFound = nodeShardsResult.allocationsFound > 0; - } else { - enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult); - } - logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard); + if (inSyncAllocationIds.isEmpty()) { + assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : + "trying to allocated a primary with an empty allocation id set, but index is new"; + // when we load an old index (after upgrading cluster) or restore a snapshot of an old index + // fall back to old version-based allocation mode + // Note that once the shard has been active, lastActiveAllocationIds will be non-empty + nodeShardsResult = buildVersionBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode, + allocation.getIgnoreNodes(unassignedShard.shardId()), shardState, logger); + if (snapshotRestore || recoverOnAnyNode) { + enoughAllocationsFound = nodeShardsResult.allocationsFound > 0; } else { - assert inSyncAllocationIds.isEmpty() == false; - // use allocation ids to select nodes - nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, - allocation.getIgnoreNodes(shard.shardId()), inSyncAllocationIds, shardState); - enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0; - logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, inSyncAllocationIds); + enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult); } + logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", unassignedShard.index(), + unassignedShard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, unassignedShard); + } else { + assert inSyncAllocationIds.isEmpty() == false; + // use allocation ids to select nodes + nodeShardsResult = buildAllocationIdBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode, + allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger); + enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0; + logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(), + unassignedShard.id(), nodeShardsResult.orderedAllocationCandidates.size(), unassignedShard, inSyncAllocationIds); + } - if (enoughAllocationsFound == false){ - if (snapshotRestore) { - // let BalancedShardsAllocator take care of allocating this shard - logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.recoverySource()); - } else if (recoverOnAnyNode) { - // let BalancedShardsAllocator take care of allocating this shard - logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id()); - } else { - // we can't really allocate, so ignore it and continue - unassignedIterator.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes()); - logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound); - } - continue; - } - - final NodesToAllocate nodesToAllocate = buildNodesToAllocate( - allocation, nodeShardsResult.orderedAllocationCandidates, shard, false - ); - if (nodesToAllocate.yesNodeShards.isEmpty() == false) { - NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0); - logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode()); - unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes()); - } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { - // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard - // can be force-allocated to one of the nodes. - final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate( - allocation, nodeShardsResult.orderedAllocationCandidates, shard, true - ); - if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) { - NodeGatewayStartedShards nodeShardState = nodesToForceAllocate.yesNodeShards.get(0); - logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation", - shard.index(), shard.id(), shard, nodeShardState.getNode()); - unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes()); - } else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) { - logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation", - shard.index(), shard.id(), shard, nodesToForceAllocate.throttleNodeShards); - unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes()); - } else { - logger.debug("[{}][{}]: forced primary allocation denied [{}]", shard.index(), shard.id(), shard); - unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_NO, allocation.changes()); - } + if (enoughAllocationsFound == false) { + if (snapshotRestore) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, will restore from [{}]", + unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource()); + return UnassignedShardDecision.DECISION_NOT_TAKEN; + } else if (recoverOnAnyNode) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id()); + return UnassignedShardDecision.DECISION_NOT_TAKEN; } else { - // we are throttling this, but we have enough to allocate to this node, ignore it for now - logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards); - unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes()); + // We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary. + // We could just be waiting for the node that holds the primary to start back up, in which case the allocation for + // this shard will be picked up when the node joins and we do another allocation reroute + logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", + unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound); + return UnassignedShardDecision.noDecision(AllocationStatus.NO_VALID_SHARD_COPY, + "shard was previously allocated, but no valid shard copy could be found amongst the current nodes in the cluster"); } } + + final NodesToAllocate nodesToAllocate = buildNodesToAllocate( + allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, false + ); + if (nodesToAllocate.yesNodeShards.isEmpty() == false) { + DecidedNode decidedNode = nodesToAllocate.yesNodeShards.get(0); + logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", + unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode()); + final String nodeId = decidedNode.nodeShardState.getNode().getId(); + return UnassignedShardDecision.yesDecision( + "the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]", + nodeId, decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain)); + } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { + // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard + // can be force-allocated to one of the nodes. + final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate( + allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, true + ); + if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) { + final DecidedNode decidedNode = nodesToForceAllocate.yesNodeShards.get(0); + final NodeGatewayStartedShards nodeShardState = decidedNode.nodeShardState; + logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation", + unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode()); + final String nodeId = nodeShardState.getNode().getId(); + return UnassignedShardDecision.yesDecision( + "allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data", + nodeId, + nodeShardState.allocationId(), + buildNodeDecisions(nodesToForceAllocate, explain)); + } else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) { + logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation", + unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards); + return UnassignedShardDecision.throttleDecision( + "allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries", + buildNodeDecisions(nodesToForceAllocate, explain)); + } else { + logger.debug("[{}][{}]: forced primary allocation denied [{}]", + unassignedShard.index(), unassignedShard.id(), unassignedShard); + return UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, + "all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted", + buildNodeDecisions(nodesToForceAllocate, explain)); + } + } else { + // we are throttling this, since we are allowed to allocate to this node but there are enough allocations + // taking place on the node currently, ignore it for now + logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", + unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards); + return UnassignedShardDecision.throttleDecision( + "allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries", + buildNodeDecisions(nodesToAllocate, explain)); + } + } + + /** + * Builds a map of nodes to the corresponding allocation decisions for those nodes. + */ + private static Map buildNodeDecisions(NodesToAllocate nodesToAllocate, boolean explain) { + if (explain == false) { + // not in explain mode, no need to return node level decisions + return null; + } + Map nodeDecisions = new LinkedHashMap<>(); + for (final DecidedNode decidedNode : nodesToAllocate.yesNodeShards) { + nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision); + } + for (final DecidedNode decidedNode : nodesToAllocate.throttleNodeShards) { + nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision); + } + for (final DecidedNode decidedNode : nodesToAllocate.noNodeShards) { + nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision); + } + return nodeDecisions; } /** @@ -205,8 +260,10 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but * entries with matching allocation id are always at the front of the list. */ - protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, - Set lastActiveAllocationIds, AsyncShardFetch.FetchResult shardState) { + protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, + Set ignoreNodes, Set lastActiveAllocationIds, + FetchResult shardState, + Logger logger) { LinkedList matchingNodeShardStates = new LinkedList<>(); LinkedList nonMatchingNodeShardStates = new LinkedList<>(); int numberOfAllocationsFound = 0; @@ -299,9 +356,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { List nodeShardStates, ShardRouting shardRouting, boolean forceAllocate) { - List yesNodeShards = new ArrayList<>(); - List throttledNodeShards = new ArrayList<>(); - List noNodeShards = new ArrayList<>(); + List yesNodeShards = new ArrayList<>(); + List throttledNodeShards = new ArrayList<>(); + List noNodeShards = new ArrayList<>(); for (NodeGatewayStartedShards nodeShardState : nodeShardStates) { RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId()); if (node == null) { @@ -310,12 +367,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { Decision decision = forceAllocate ? allocation.deciders().canForceAllocatePrimary(shardRouting, node, allocation) : allocation.deciders().canAllocate(shardRouting, node, allocation); - if (decision.type() == Decision.Type.THROTTLE) { - throttledNodeShards.add(nodeShardState); - } else if (decision.type() == Decision.Type.NO) { - noNodeShards.add(nodeShardState); + DecidedNode decidedNode = new DecidedNode(nodeShardState, decision); + if (decision.type() == Type.THROTTLE) { + throttledNodeShards.add(decidedNode); + } else if (decision.type() == Type.NO) { + noNodeShards.add(decidedNode); } else { - yesNodeShards.add(nodeShardState); + yesNodeShards.add(decidedNode); } } return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards)); @@ -325,8 +383,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { * Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to * the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list. */ - NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, - AsyncShardFetch.FetchResult shardState) { + static NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, + FetchResult shardState, Logger logger) { final List allocationCandidates = new ArrayList<>(); int numberOfAllocationsFound = 0; long highestVersion = ShardStateMetaData.NO_VERSION; @@ -400,7 +458,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { && IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings); } - protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); + protected abstract FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); static class NodeShardsResult { public final List orderedAllocationCandidates; @@ -413,16 +471,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } static class NodesToAllocate { - final List yesNodeShards; - final List throttleNodeShards; - final List noNodeShards; + final List yesNodeShards; + final List throttleNodeShards; + final List noNodeShards; - public NodesToAllocate(List yesNodeShards, - List throttleNodeShards, - List noNodeShards) { + public NodesToAllocate(List yesNodeShards, List throttleNodeShards, List noNodeShards) { this.yesNodeShards = yesNodeShards; this.throttleNodeShards = throttleNodeShards; this.noNodeShards = noNodeShards; } } + + /** + * This class encapsulates the shard state retrieved from a node and the decision that was made + * by the allocator for allocating to the node that holds the shard copy. + */ + private static class DecidedNode { + final NodeGatewayStartedShards nodeShardState; + final Decision decision; + + private DecidedNode(NodeGatewayStartedShards nodeShardState, Decision decision) { + this.nodeShardState = nodeShardState; + this.decision = decision; + } + } } diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index d2fbeee5776..8f90e072ed2 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongMap; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectLongCursor; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; @@ -31,24 +31,25 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.elasticsearch.cluster.routing.RoutingChangesObserver; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; /** */ -public abstract class ReplicaShardAllocator extends AbstractComponent { +public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { public ReplicaShardAllocator(Settings settings) { super(settings); @@ -96,7 +97,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { continue; } - MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores); + MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores, false); if (matchingNodes.getNodeWithHighestMatch() != null) { DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId()); DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch(); @@ -128,86 +129,88 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } } - public void allocateUnassigned(RoutingAllocation allocation) { - final RoutingNodes routingNodes = allocation.routingNodes(); - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); - while (unassignedIterator.hasNext()) { - ShardRouting shard = unassignedIterator.next(); - if (shard.primary()) { - continue; - } - - // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { - continue; - } - - // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing - Decision decision = canBeAllocatedToAtLeastOneNode(shard, allocation); - if (decision.type() != Decision.Type.YES) { - logger.trace("{}: ignoring allocation, can't be allocated on any node", shard); - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes()); - continue; - } - - AsyncShardFetch.FetchResult shardStores = fetchData(shard, allocation); - if (shardStores.hasData() == false) { - logger.trace("{}: ignoring allocation, still fetching shard stores", shard); - allocation.setHasPendingAsyncFetch(); - unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes()); - continue; // still fetching - } - - ShardRouting primaryShard = routingNodes.activePrimary(shard.shardId()); - assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; - TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores); - if (primaryStore == null) { - // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) - // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica - // will try and recover from - // Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData - logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard); - continue; - } - - MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores); - - if (matchingNodes.getNodeWithHighestMatch() != null) { - RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId()); - // we only check on THROTTLE since we checked before before on NO - decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation); - if (decision.type() == Decision.Type.THROTTLE) { - logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node()); - // we are throttling this, but we have enough to allocate to this node, ignore it for now - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes()); - } else { - logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node()); - // we found a match - unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); - } - } else if (matchingNodes.hasAnyData() == false) { - // if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed - ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes()); - } - } + /** + * Is the allocator responsible for allocating the given {@link ShardRouting}? + */ + private static boolean isResponsibleFor(final ShardRouting shard) { + return shard.primary() == false // must be a replica + && shard.unassigned() // must be unassigned + // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... + && shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED; } - /** - * Check if the allocation of the replica is to be delayed. Compute the delay and if it is delayed, add it to the ignore unassigned list - * Note: we only care about replica in delayed allocation, since if we have an unassigned primary it - * will anyhow wait to find an existing copy of the shard to be allocated - * Note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService - * - * PUBLIC FOR TESTS! - * - * @param unassignedIterator iterator over unassigned shards - * @param shard the shard which might be delayed - */ - public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard, RoutingChangesObserver changes) { - if (shard.unassignedInfo().isDelayed()) { - logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard); - unassignedIterator.removeAndIgnore(AllocationStatus.DELAYED_ALLOCATION, changes); + @Override + public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard, + final RoutingAllocation allocation, + final Logger logger) { + if (isResponsibleFor(unassignedShard) == false) { + // this allocator is not responsible for deciding on this shard + return UnassignedShardDecision.DECISION_NOT_TAKEN; } + + final RoutingNodes routingNodes = allocation.routingNodes(); + final boolean explain = allocation.debugDecision(); + // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing + Tuple> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain); + if (allocateDecision.v1().type() != Decision.Type.YES) { + logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard); + return UnassignedShardDecision.noDecision(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1()), + "all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard", + allocateDecision.v2()); + } + + AsyncShardFetch.FetchResult shardStores = fetchData(unassignedShard, allocation); + if (shardStores.hasData() == false) { + logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard); + allocation.setHasPendingAsyncFetch(); + return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA, + "still fetching shard state from the nodes in the cluster"); + } + + ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId()); + assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; + TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores); + if (primaryStore == null) { + // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) + // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica + // will try and recover from + // Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData + logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard); + return UnassignedShardDecision.DECISION_NOT_TAKEN; + } + + MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain); + assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions"; + + if (matchingNodes.getNodeWithHighestMatch() != null) { + RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId()); + // we only check on THROTTLE since we checked before before on NO + Decision decision = allocation.deciders().canAllocate(unassignedShard, nodeWithHighestMatch, allocation); + if (decision.type() == Decision.Type.THROTTLE) { + logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", + unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node()); + // we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now + return UnassignedShardDecision.throttleDecision( + "returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one " + + "of those copies", matchingNodes.nodeDecisions); + } else { + logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", + unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node()); + // we found a match + return UnassignedShardDecision.yesDecision( + "allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store", + nodeWithHighestMatch.nodeId(), null, matchingNodes.nodeDecisions); + } + } else if (matchingNodes.hasAnyData() == false && unassignedShard.unassignedInfo().isDelayed()) { + // if we didn't manage to find *any* data (regardless of matching sizes), and the replica is + // unassigned due to a node leaving, so we delay allocation of this replica to see if the + // node with the shard copy will rejoin so we can re-use the copy it has + logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard); + return UnassignedShardDecision.noDecision(AllocationStatus.DELAYED_ALLOCATION, + "not allocating this shard, no nodes contain data for the replica and allocation is delayed"); + } + + return UnassignedShardDecision.DECISION_NOT_TAKEN; } /** @@ -215,10 +218,15 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { * * Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one * node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided - * YES or THROTTLE. + * YES or THROTTLE). If the explain flag is turned on AND the decision is NO or THROTTLE, then this method + * also returns a map of nodes to decisions (second value in the tuple) to use for explanations; if the explain + * flag is off, the second value in the return tuple will be null. */ - private Decision canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) { + private Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, + RoutingAllocation allocation, + boolean explain) { Decision madeDecision = Decision.NO; + Map nodeDecisions = new HashMap<>(); for (ObjectCursor cursor : allocation.nodes().getDataNodes().values()) { RoutingNode node = allocation.routingNodes().node(cursor.value.getId()); if (node == null) { @@ -227,13 +235,16 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { // if we can't allocate it on a node, ignore it, for example, this handles // cases for only allocating a replica after a primary Decision decision = allocation.deciders().canAllocate(shard, node, allocation); + if (explain) { + nodeDecisions.put(node.nodeId(), decision); + } if (decision.type() == Decision.Type.YES) { - return decision; + return Tuple.tuple(decision, null); } else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) { madeDecision = decision; } } - return madeDecision; + return Tuple.tuple(madeDecision, explain ? nodeDecisions : null); } /** @@ -254,8 +265,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation, TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore, - AsyncShardFetch.FetchResult data) { + AsyncShardFetch.FetchResult data, + boolean explain) { ObjectLongMap nodesToSize = new ObjectLongHashMap<>(); + Map nodeDecisions = new HashMap<>(); for (Map.Entry nodeStoreEntry : data.getData().entrySet()) { DiscoveryNode discoNode = nodeStoreEntry.getKey(); TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData(); @@ -273,6 +286,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { // we only check for NO, since if this node is THROTTLING and it has enough "same data" // then we will try and assign it next time Decision decision = allocation.deciders().canAllocate(shard, node, allocation); + if (explain) { + nodeDecisions.put(node.nodeId(), decision); + } + if (decision.type() == Decision.Type.NO) { continue; } @@ -297,7 +314,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } } - return new MatchingNodes(nodesToSize); + return new MatchingNodes(nodesToSize, explain ? nodeDecisions : null); } protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); @@ -305,9 +322,12 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { static class MatchingNodes { private final ObjectLongMap nodesToSize; private final DiscoveryNode nodeWithHighestMatch; + @Nullable + private final Map nodeDecisions; - public MatchingNodes(ObjectLongMap nodesToSize) { + public MatchingNodes(ObjectLongMap nodesToSize, @Nullable Map nodeDecisions) { this.nodesToSize = nodesToSize; + this.nodeDecisions = nodeDecisions; long highestMatchSize = 0; DiscoveryNode highestMatchNode = null; @@ -340,5 +360,13 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { public boolean hasAnyData() { return nodesToSize.isEmpty() == false; } + + /** + * The decisions map for all nodes with a shard copy, if available. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java new file mode 100644 index 00000000000..412cc3322f2 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UnassignedShardDecisionTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Unit tests for the {@link UnassignedShardDecision} class. + */ +public class UnassignedShardDecisionTests extends ESTestCase { + + public void testDecisionNotTaken() { + UnassignedShardDecision unassignedShardDecision = UnassignedShardDecision.DECISION_NOT_TAKEN; + assertFalse(unassignedShardDecision.isDecisionTaken()); + assertNull(unassignedShardDecision.getFinalDecision()); + assertNull(unassignedShardDecision.getAllocationStatus()); + assertNull(unassignedShardDecision.getAllocationId()); + assertNull(unassignedShardDecision.getAssignedNodeId()); + assertNull(unassignedShardDecision.getFinalExplanation()); + assertNull(unassignedShardDecision.getNodeDecisions()); + expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalDecisionSafe()); + expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalExplanationSafe()); + } + + public void testNoDecision() { + final AllocationStatus allocationStatus = randomFrom( + AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA + ); + UnassignedShardDecision noDecision = UnassignedShardDecision.noDecision(allocationStatus, "something is wrong"); + assertTrue(noDecision.isDecisionTaken()); + assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type()); + assertEquals(allocationStatus, noDecision.getAllocationStatus()); + assertEquals("something is wrong", noDecision.getFinalExplanation()); + assertNull(noDecision.getNodeDecisions()); + assertNull(noDecision.getAssignedNodeId()); + assertNull(noDecision.getAllocationId()); + + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", Decision.NO); + nodeDecisions.put("node2", Decision.NO); + noDecision = UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, "something is wrong", nodeDecisions); + assertTrue(noDecision.isDecisionTaken()); + assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type()); + assertEquals(AllocationStatus.DECIDERS_NO, noDecision.getAllocationStatus()); + assertEquals("something is wrong", noDecision.getFinalExplanation()); + assertEquals(nodeDecisions, noDecision.getNodeDecisions()); + assertNull(noDecision.getAssignedNodeId()); + assertNull(noDecision.getAllocationId()); + + // test bad values + expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(null, "a")); + expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, null)); + } + + public void testThrottleDecision() { + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", Decision.NO); + nodeDecisions.put("node2", Decision.THROTTLE); + UnassignedShardDecision throttleDecision = UnassignedShardDecision.throttleDecision("too much happening", nodeDecisions); + assertTrue(throttleDecision.isDecisionTaken()); + assertEquals(Decision.Type.THROTTLE, throttleDecision.getFinalDecision().type()); + assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus()); + assertEquals("too much happening", throttleDecision.getFinalExplanation()); + assertEquals(nodeDecisions, throttleDecision.getNodeDecisions()); + assertNull(throttleDecision.getAssignedNodeId()); + assertNull(throttleDecision.getAllocationId()); + + // test bad values + expectThrows(NullPointerException.class, () -> UnassignedShardDecision.throttleDecision(null, Collections.emptyMap())); + } + + public void testYesDecision() { + Map nodeDecisions = new HashMap<>(); + nodeDecisions.put("node1", Decision.YES); + nodeDecisions.put("node2", Decision.NO); + String allocId = randomBoolean() ? "allocId" : null; + UnassignedShardDecision yesDecision = UnassignedShardDecision.yesDecision( + "node was very kind", "node1", allocId, nodeDecisions + ); + assertTrue(yesDecision.isDecisionTaken()); + assertEquals(Decision.Type.YES, yesDecision.getFinalDecision().type()); + assertNull(yesDecision.getAllocationStatus()); + assertEquals("node was very kind", yesDecision.getFinalExplanation()); + assertEquals(nodeDecisions, yesDecision.getNodeDecisions()); + assertEquals("node1", yesDecision.getAssignedNodeId()); + assertEquals(allocId, yesDecision.getAllocationId()); + + expectThrows(NullPointerException.class, + () -> UnassignedShardDecision.yesDecision(null, "a", randomBoolean() ? "a" : null, Collections.emptyMap())); + expectThrows(NullPointerException.class, + () -> UnassignedShardDecision.yesDecision("a", null, null, Collections.emptyMap())); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index a1efa69f775..f6b9a0a19ed 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -38,10 +38,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationD import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.GatewayAllocator; -import org.elasticsearch.gateway.ReplicaShardAllocator; -import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.NoopGatewayAllocator; @@ -209,14 +206,6 @@ public abstract class ESAllocationTestCase extends ESTestCase { * Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet. */ protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator { - private final ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) { - @Override - protected AsyncShardFetch.FetchResult - fetchData(ShardRouting shard, RoutingAllocation allocation) { - return new AsyncShardFetch.FetchResult<>(shard.shardId(), null, Collections.emptySet(), Collections.emptySet()); - } - }; - public DelayedShardsMockGatewayAllocator() { super(Settings.EMPTY, null, null); @@ -236,7 +225,9 @@ public abstract class ESAllocationTestCase extends ESTestCase { if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { continue; } - replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes()); + if (shard.unassignedInfo().isDelayed()) { + unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes()); + } } } } From 7f92971f261029aa85f8933116d7dd1caea43538 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Mon, 12 Sep 2016 22:29:42 +0200 Subject: [PATCH 10/18] remove assumeX methods from IndexShardTests The cause early termination of tests, which means we don't clean up and close shards, but also don't cause a failure. This in turns makes TestRuleTemporaryFilesCleanup fail on windows (because it does try to clean up, but the files are referenced). Getting stuff like: ``` > C:\jenkins\workspace\es_core_master_windows-2012-r2\core\build\testrun\test\J3\temp\org.elasticsearch.index.shard.IndexShardTests_68B5E1103D78A58B-001\tempDir-006\indices\_na_\0\translog\translog-1.tlog: java.nio.file.AccessDeniedException: C:\jenkins\workspace\es_core_master_windows-2012-r2\core\build\testrun\test\J3\temp\org.elasticsearch.index.shard.IndexShardTests_68B5E1103D78A58B-001\tempDir-006\indices\_na_\0\translog\translog-1.tlog ``` --- .../org/elasticsearch/index/shard/IndexShardTests.java | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index c0375b2f98b..269694ed5ef 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -525,9 +525,11 @@ public class IndexShardTests extends IndexShardTestCase { expectedSubSequence.append("\",\"data_path\":\""); expectedSubSequence.append(shard.shardPath().getRootDataPath().toString()); expectedSubSequence.append("\",\"is_custom_data_path\":").append(shard.shardPath().isCustomDataPath()).append("}"); - assumeFalse("Some path weirdness on windows", Constants.WINDOWS); - assertTrue(xContent.contains(expectedSubSequence)); - + if (Constants.WINDOWS) { + // Some path weirdness on windows + } else { + assertTrue(xContent.contains(expectedSubSequence)); + } closeShards(shard); } @@ -1014,7 +1016,7 @@ public class IndexShardTests extends IndexShardTestCase { assertThat(before.getMemorySizeInBytes(), equalTo(0L)); FieldDataStats after = null; try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - assumeTrue("we have to have more than one segment", searcher.getDirectoryReader().leaves().size() > 1); + assertThat("we have to have more than one segment", searcher.getDirectoryReader().leaves().size(), greaterThan(1)); ifd.loadGlobal(searcher.getDirectoryReader()); after = shard.fieldData().stats("foo"); assertEquals(after.getEvictions(), before.getEvictions()); From 686994ae2dbb33b1f36bdde3a63162d6c46eafbc Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 12 Sep 2016 22:42:55 +0200 Subject: [PATCH 11/18] Deguice SearchService and friends (#20423) This change removes the guice dependency handling for SearchService and several related classes like SearchTransportController and SearchPhaseController. The latter two now have package private constructors and dependencies like FetchPhase are now created by calling their constructors explicitly. This also cleans up several users of the DefaultSearchContext and centralized it's creation inside SearchService. --- .../query/TransportValidateQueryAction.java | 64 ++++++------------- .../explain/TransportExplainAction.java | 55 +++++----------- .../elasticsearch/action/get/GetRequest.java | 12 ---- .../action/get/GetRequestBuilder.java | 5 -- .../action/get/MultiGetRequest.java | 9 --- .../action/get/MultiGetRequestBuilder.java | 5 -- .../action/get/MultiGetShardRequest.java | 14 ---- .../action/get/TransportGetAction.java | 2 +- .../get/TransportShardMultiGetAction.java | 6 +- .../search/AbstractSearchAsyncAction.java | 2 - .../SearchDfsQueryAndFetchAsyncAction.java | 2 - .../SearchDfsQueryThenFetchAsyncAction.java | 2 - .../search}/SearchPhaseController.java | 5 +- .../SearchQueryAndFetchAsyncAction.java | 2 - .../SearchQueryThenFetchAsyncAction.java | 2 - .../SearchScrollQueryAndFetchAsyncAction.java | 2 - ...SearchScrollQueryThenFetchAsyncAction.java | 2 - .../search}/SearchTransportService.java | 7 +- .../search/TransportClearScrollAction.java | 6 +- .../action/search/TransportSearchAction.java | 13 ++-- .../search/TransportSearchScrollAction.java | 15 +++-- .../action/update/UpdateHelper.java | 2 +- .../index/get/ShardGetService.java | 6 +- .../java/org/elasticsearch/node/Node.java | 20 +++--- .../rest/action/document/RestGetAction.java | 1 - .../action/document/RestMultiGetAction.java | 1 - .../{internal => }/DefaultSearchContext.java | 50 ++++++--------- .../elasticsearch/search/SearchModule.java | 14 ++-- .../elasticsearch/search/SearchService.java | 32 +++++----- .../internal/FilteredSearchContext.java | 4 +- .../search/internal/SearchContext.java | 3 +- .../internal/ShardSearchLocalRequest.java | 7 +- .../search/internal/SubSearchContext.java | 2 +- .../search/query/QueryPhase.java | 2 +- .../action/IndicesRequestIT.java | 2 +- .../action/get/MultiGetShardRequestTests.java | 3 - .../search}/SearchPhaseControllerTests.java | 3 +- .../org/elasticsearch/get/GetActionIT.java | 40 +++++------- .../DefaultSearchContextTests.java | 3 +- .../search/SearchRequestTests.java | 7 +- .../aggregations/AggregatorParsingTests.java | 7 +- .../aggregations/BaseAggregationTestCase.java | 7 +- .../builder/SearchSourceBuilderTests.java | 7 +- .../ShardSearchTransportRequestTests.java | 7 +- .../java/org/elasticsearch/node/MockNode.java | 17 +++-- .../search/MockSearchService.java | 8 +-- .../test/AbstractQueryTestCase.java | 7 +- .../elasticsearch/test/TestSearchContext.java | 2 +- 48 files changed, 167 insertions(+), 329 deletions(-) rename core/src/main/java/org/elasticsearch/{search/controller => action/search}/SearchPhaseController.java (99%) rename core/src/main/java/org/elasticsearch/{search/action => action/search}/SearchTransportService.java (98%) rename core/src/main/java/org/elasticsearch/search/{internal => }/DefaultSearchContext.java (95%) rename core/src/test/java/org/elasticsearch/{search/controller => action/search}/SearchPhaseControllerTests.java (99%) rename core/src/test/java/org/elasticsearch/search/{internal => }/DefaultSearchContextTests.java (97%) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index d1405e92e1c..718d3b25e69 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -38,17 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.internal.DefaultSearchContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.tasks.Task; @@ -67,25 +61,15 @@ import java.util.concurrent.atomic.AtomicReferenceArray; */ public class TransportValidateQueryAction extends TransportBroadcastAction { - private final IndicesService indicesService; - - private final ScriptService scriptService; - - private final BigArrays bigArrays; - - private final FetchPhase fetchPhase; + private final SearchService searchService; @Inject public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, ScriptService scriptService, - BigArrays bigArrays, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) { + TransportService transportService, SearchService searchService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH); - this.indicesService = indicesService; - this.scriptService = scriptService; - this.bigArrays = bigArrays; - this.fetchPhase = fetchPhase; + this.searchService = searchService; } @Override @@ -161,29 +145,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction SearchContext.removeCurrent()); } return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error); } - private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException { - Query queryRewrite = searcher.rewrite(query); - if (queryRewrite instanceof MatchNoDocsQuery) { - return query.toString(); + private String explain(SearchContext context, boolean rewritten) throws IOException { + Query query = context.query(); + if (rewritten && query instanceof MatchNoDocsQuery) { + return context.parsedQuery().query().toString(); } else { - return queryRewrite.toString(); + return query.toString(); } } } diff --git a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 95177853d41..1ce2eafc843 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -31,20 +31,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.internal.DefaultSearchContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.rescore.RescoreSearchContext; @@ -60,26 +54,15 @@ import java.io.IOException; // TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain. public class TransportExplainAction extends TransportSingleShardAction { - private final IndicesService indicesService; - - private final ScriptService scriptService; - - - private final BigArrays bigArrays; - - private final FetchPhase fetchPhase; + private final SearchService searchService; @Inject public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, ScriptService scriptService, - BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - FetchPhase fetchPhase) { + TransportService transportService, SearchService searchService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, ExplainRequest::new, ThreadPool.Names.GET); - this.indicesService = indicesService; - this.scriptService = scriptService; - this.bigArrays = bigArrays; - this.fetchPhase = fetchPhase; + this.searchService = searchService; } @Override @@ -104,23 +87,19 @@ public class TransportExplainAction extends TransportSingleShardAction SearchContext.removeCurrent()); } } diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java index 42c4ccc701d..76de3bbd4b6 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -61,7 +61,6 @@ public class GetRequest extends SingleShardRequest implements Realti private VersionType versionType = VersionType.INTERNAL; private long version = Versions.MATCH_ANY; - private boolean ignoreErrorsOnGeneratedFields; public GetRequest() { type = "_all"; @@ -248,19 +247,10 @@ public class GetRequest extends SingleShardRequest implements Realti return this; } - public GetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) { - this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields; - return this; - } - public VersionType versionType() { return this.versionType; } - public boolean ignoreErrorsOnGeneratedFields() { - return ignoreErrorsOnGeneratedFields; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -278,7 +268,6 @@ public class GetRequest extends SingleShardRequest implements Realti } } realtime = in.readBoolean(); - this.ignoreErrorsOnGeneratedFields = in.readBoolean(); this.versionType = VersionType.fromValue(in.readByte()); this.version = in.readLong(); @@ -304,7 +293,6 @@ public class GetRequest extends SingleShardRequest implements Realti } } out.writeBoolean(realtime); - out.writeBoolean(ignoreErrorsOnGeneratedFields); out.writeByte(versionType.getValue()); out.writeLong(version); out.writeOptionalStreamable(fetchSourceContext); diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java index 7827de12eac..a3f070fd2ef 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java @@ -155,11 +155,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder implements I String preference; boolean realtime = true; boolean refresh; - public boolean ignoreErrorsOnGeneratedFields = false; - List items = new ArrayList<>(); public List getItems() { @@ -338,11 +336,6 @@ public class MultiGetRequest extends ActionRequest implements I } - public MultiGetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) { - this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields; - return this; - } - public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception { return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true); } @@ -510,7 +503,6 @@ public class MultiGetRequest extends ActionRequest implements I preference = in.readOptionalString(); refresh = in.readBoolean(); realtime = in.readBoolean(); - ignoreErrorsOnGeneratedFields = in.readBoolean(); int size = in.readVInt(); items = new ArrayList<>(size); @@ -525,7 +517,6 @@ public class MultiGetRequest extends ActionRequest implements I out.writeOptionalString(preference); out.writeBoolean(refresh); out.writeBoolean(realtime); - out.writeBoolean(ignoreErrorsOnGeneratedFields); out.writeVInt(items.size()); for (Item item : items) { diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java index 6e32e1caf30..a2cb204d5ea 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java @@ -80,9 +80,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder items; @@ -52,7 +51,6 @@ public class MultiGetShardRequest extends SingleShardRequest) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e); + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, + item.type(), item.id()), e); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index d86134c7d44..6cb68b8e9be 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -40,8 +40,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 367832afab3..ba73b0f4bea 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 7ceefb1998c..ccd646ae129 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java similarity index 99% rename from core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java rename to core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 1766d41dde3..7306e645e0d 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.controller; +package org.elasticsearch.action.search; import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.ObjectObjectHashMap; @@ -89,8 +89,7 @@ public class SearchPhaseController extends AbstractComponent { private final ScriptService scriptService; private final ClusterService clusterService; - @Inject - public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) { + SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) { super(settings); this.bigArrays = bigArrays; this.scriptService = scriptService; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index 2e13a0d26e8..d799bc26764 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 3987b48c561..6df2bb3f87e 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 24e497954a7..2bdf7dc30f9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; import org.elasticsearch.search.internal.InternalScrollSearchRequest; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 21f1c4ce68a..4024d3b5f39 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -29,8 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.action.SearchTransportService; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest; diff --git a/core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java similarity index 98% rename from core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java rename to core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 8552d21b5c3..985cc854f05 100644 --- a/core/src/main/java/org/elasticsearch/search/action/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -17,17 +17,15 @@ * under the License. */ -package org.elasticsearch.search.action; +package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -75,8 +73,7 @@ public class SearchTransportService extends AbstractComponent { private final TransportService transportService; private final SearchService searchService; - @Inject - public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) { + SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) { super(settings); this.transportService = transportService; this.searchService = searchService; diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index 220e7f5b250..cb7e7531d2d 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -32,7 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.search.action.SearchTransportService; +import org.elasticsearch.search.SearchService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; @@ -53,11 +53,11 @@ public class TransportClearScrollAction extends HandledTransportAction searchServiceImpl = pickSearchServiceImplementation(); - if (searchServiceImpl == SearchService.class) { - b.bind(SearchService.class).asEagerSingleton(); - } else { - b.bind(SearchService.class).to(searchServiceImpl).asEagerSingleton(); - } + b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService, + threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase())); pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p)); } @@ -793,10 +787,12 @@ public class Node implements Closeable { } /** - * Select the search service implementation. Overrided by tests. + * Creates a new the SearchService. This method can be overwritten by tests to inject mock implementations. */ - protected Class pickSearchServiceImplementation() { - return SearchService.class; + protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService, + ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, + FetchPhase fetchPhase) { + return new SearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); } /** diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index 8c782a8d128..f127a58579e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -58,7 +58,6 @@ public class RestGetAction extends BaseRestHandler { getRequest.parent(request.param("parent")); getRequest.preference(request.param("preference")); getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime())); - getRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false)); String sField = request.param("fields"); if (sField != null) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java index 995c43059da..50bd4c37ac7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestMultiGetAction.java @@ -59,7 +59,6 @@ public class RestMultiGetAction extends BaseRestHandler { multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.preference(request.param("preference")); multiGetRequest.realtime(request.paramAsBoolean("realtime", multiGetRequest.realtime())); - multiGetRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false)); String[] sFields = null; String sField = request.param("fields"); diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java similarity index 95% rename from core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java rename to core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index b1618c3a205..20789e015a9 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.internal; +package org.elasticsearch.search; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause.Occur; @@ -53,8 +53,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchExtBuilder; -import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchPhase; @@ -64,6 +62,10 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhaseExecutionException; @@ -80,7 +82,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -public class DefaultSearchContext extends SearchContext { +final class DefaultSearchContext extends SearchContext { private final long id; private final ShardSearchRequest request; @@ -123,10 +125,7 @@ public class DefaultSearchContext extends SearchContext { * things like the type filter or alias filters. */ private ParsedQuery originalQuery; - /** - * Just like originalQuery but with the filters from types, aliases and slice applied. - */ - private ParsedQuery filteredQuery; + /** * The query to actually execute. */ @@ -151,7 +150,7 @@ public class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private FetchPhase fetchPhase; - public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, + DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard, ScriptService scriptService, BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout, FetchPhase fetchPhase) { @@ -187,7 +186,7 @@ public class DefaultSearchContext extends SearchContext { * Should be called before executing the main query and after all other parameters have been set. */ @Override - public void preProcess() { + public void preProcess(boolean rewrite) { if (hasOnlySuggest() ) { return; } @@ -241,20 +240,22 @@ public class DefaultSearchContext extends SearchContext { if (queryBoost() != AbstractQueryBuilder.DEFAULT_BOOST) { parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new WeightFactorFunction(queryBoost)), parsedQuery())); } - filteredQuery(buildFilteredQuery()); - try { - this.query = searcher().rewrite(this.query); - } catch (IOException e) { - throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e); + this.query = buildFilteredQuery(); + if (rewrite) { + try { + this.query = searcher.rewrite(query); + } catch (IOException e) { + throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e); + } } } - private ParsedQuery buildFilteredQuery() { - Query searchFilter = searchFilter(queryShardContext.getTypes()); + private Query buildFilteredQuery() { + final Query searchFilter = searchFilter(queryShardContext.getTypes()); if (searchFilter == null) { - return originalQuery; + return originalQuery.query(); } - Query result; + final Query result; if (Queries.isConstantMatchAllQuery(query())) { result = new ConstantScoreQuery(searchFilter); } else { @@ -263,7 +264,7 @@ public class DefaultSearchContext extends SearchContext { .add(searchFilter, Occur.FILTER) .build(); } - return new ParsedQuery(result, originalQuery); + return result; } @Override @@ -618,15 +619,6 @@ public class DefaultSearchContext extends SearchContext { return this; } - public ParsedQuery filteredQuery() { - return filteredQuery; - } - - private void filteredQuery(ParsedQuery filteredQuery) { - this.filteredQuery = filteredQuery; - this.query = filteredQuery.query(); - } - @Override public ParsedQuery parsedQuery() { return this.originalQuery; diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java index 0a1d9824ea9..ec9f33e6c19 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java @@ -95,7 +95,6 @@ import org.elasticsearch.plugins.SearchPlugin.QuerySpec; import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec; import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec; import org.elasticsearch.plugins.SearchPlugin.SearchExtensionSpec; -import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorParsers; @@ -243,7 +242,6 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator; -import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase; @@ -384,7 +382,6 @@ public class SearchModule extends AbstractModule { bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry); bind(SearchRequestParsers.class).toInstance(searchRequestParsers); bind(SearchExtRegistry.class).toInstance(searchExtParserRegistry); - configureSearch(); } } @@ -574,13 +571,6 @@ public class SearchModule extends AbstractModule { } } - protected void configureSearch() { - // configure search private classes... - bind(SearchPhaseController.class).asEagerSingleton(); - bind(FetchPhase.class).toInstance(new FetchPhase(fetchSubPhases)); - bind(SearchTransportService.class).asEagerSingleton(); - } - private void registerShapes() { if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { ShapeBuilders.register(namedWriteables); @@ -817,4 +807,8 @@ public class SearchModule extends AbstractModule { queryParserRegistry.register(spec.getParser(), spec.getName()); namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader())); } + + public FetchPhase getFetchPhase() { + return new FetchPhase(fetchSubPhases); + } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 2fb87565aa3..4334c5cf541 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -29,9 +29,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -66,7 +64,6 @@ import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.internal.DefaultSearchContext; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; @@ -141,10 +138,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final ParseFieldMatcher parseFieldMatcher; - @Inject - public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, + public SearchService(ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) { - super(settings); + super(clusterService.getSettings()); this.parseFieldMatcher = new ParseFieldMatcher(settings); this.threadPool = threadPool; this.clusterService = clusterService; @@ -160,7 +156,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME); defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); + clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); } private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { @@ -520,16 +516,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId()); - Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - - DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, - indexService, - indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, - defaultSearchTimeout, fetchPhase); + DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher); SearchContext.setCurrent(context); try { request.rewrite(context.getQueryShardContext()); @@ -572,6 +560,18 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv return context; } + public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId()); + Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; + + return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, + indexService, + indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, + timeout, fetchPhase); + } + private void freeAllContextForIndex(Index index) { assert index != null; for (SearchContext ctx : activeContexts.values()) { diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 18af4873db3..04156bfac2d 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -100,8 +100,8 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public void preProcess() { - in.preProcess(); + public void preProcess(boolean rewrite) { + in.preProcess(rewrite); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index e96c9856a33..459df14a357 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -139,8 +139,9 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas /** * Should be called before executing the main query and after all other parameters have been set. + * @param rewrite if the set query should be rewritten against the searcher returned from {@link #searcher()} */ - public abstract void preProcess(); + public abstract void preProcess(boolean rewrite); public abstract Query searchFilter(String[] types); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index d025d573c14..0d6148011ed 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -81,14 +81,11 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.nowInMillis = nowInMillis; } - public ShardSearchLocalRequest(String[] types, long nowInMillis) { + public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) { this.types = types; this.nowInMillis = nowInMillis; - } - - public ShardSearchLocalRequest(String[] types, long nowInMillis, String[] filteringAliases) { - this(types, nowInMillis); this.filteringAliases = filteringAliases; + this.shardId = shardId; } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index dc385a0e120..2eb2d34dd2c 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -77,7 +77,7 @@ public class SubSearchContext extends FilteredSearchContext { } @Override - public void preProcess() { + public void preProcess(boolean rewrite) { } @Override diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 47fa98856cf..d1e90b2e9a5 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -85,7 +85,7 @@ public class QueryPhase implements SearchPhase { @Override public void preProcess(SearchContext context) { - context.preProcess(); + context.preProcess(true); } @Override diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 1a11e3f4803..5b901536471 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -86,7 +86,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.action.SearchTransportService; +import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; diff --git a/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java b/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java index dab737cf7f5..eed5f85c4a4 100644 --- a/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java @@ -42,8 +42,6 @@ public class MultiGetShardRequestTests extends ESTestCase { if (randomBoolean()) { multiGetRequest.refresh(true); } - multiGetRequest.ignoreErrorsOnGeneratedFields(randomBoolean()); - MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0); int numItems = iterations(10, 30); for (int i = 0; i < numItems; i++) { @@ -79,7 +77,6 @@ public class MultiGetShardRequestTests extends ESTestCase { assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference())); assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime())); assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh())); - assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields())); assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size())); for (int i = 0; i < multiGetShardRequest2.items.size(); i++) { MultiGetRequest.Item item = multiGetShardRequest.items.get(i); diff --git a/core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java similarity index 99% rename from core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java rename to core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 33ef23db6ed..2778a9dbf47 100644 --- a/core/src/test/java/org/elasticsearch/search/controller/SearchPhaseControllerTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -17,10 +17,11 @@ * under the License. */ -package org.elasticsearch.search.controller; +package org.elasticsearch.action.search; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; diff --git a/core/src/test/java/org/elasticsearch/get/GetActionIT.java b/core/src/test/java/org/elasticsearch/get/GetActionIT.java index 25c0e616090..052fa91d876 100644 --- a/core/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/core/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -930,36 +930,30 @@ public class GetActionIT extends ESIntegTestCase { private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields, @Nullable String routing) { for (String field : fields) { - assertGetFieldWorks(index, type, docId, field, false, routing); - assertGetFieldWorks(index, type, docId, field, true, routing); + assertGetFieldWorks(index, type, docId, field, routing); + assertGetFieldWorks(index, type, docId, field, routing); } } - private void assertGetFieldWorks(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { - GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing); + private void assertGetFieldWorks(String index, String type, String docId, String field, @Nullable String routing) { + GetResponse response = getDocument(index, type, docId, field, routing); assertThat(response.getId(), equalTo(docId)); assertTrue(response.isExists()); assertNotNull(response.getField(field)); - response = multiGetDocument(index, type, docId, field, ignoreErrors, routing); + response = multiGetDocument(index, type, docId, field, routing); assertThat(response.getId(), equalTo(docId)); assertTrue(response.isExists()); assertNotNull(response.getField(field)); } - protected void assertGetFieldsException(String index, String type, String docId, String[] fields) { - for (String field : fields) { - assertGetFieldException(index, type, docId, field); - } - } - private void assertGetFieldException(String index, String type, String docId, String field) { try { - client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(false).get(); + client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).get(); fail(); } catch (ElasticsearchException e) { assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called.")); } - MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).setIgnoreErrorsOnGeneratedFields(false).get(); + MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).get(); assertNull(multiGetResponse.getResponses()[0].getResponse()); assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called.")); } @@ -970,7 +964,7 @@ public class GetActionIT extends ESIntegTestCase { protected void assertGetFieldsNull(String index, String type, String docId, String[] fields, @Nullable String routing) { for (String field : fields) { - assertGetFieldNull(index, type, docId, field, true, routing); + assertGetFieldNull(index, type, docId, field, routing); } } @@ -980,37 +974,37 @@ public class GetActionIT extends ESIntegTestCase { protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields, @Nullable String routing) { for (String field : fields) { - assertGetFieldNull(index, type, docId, field, true, routing); - assertGetFieldNull(index, type, docId, field, false, routing); + assertGetFieldNull(index, type, docId, field, routing); + assertGetFieldNull(index, type, docId, field, routing); } } - protected void assertGetFieldNull(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { + protected void assertGetFieldNull(String index, String type, String docId, String field, @Nullable String routing) { //for get - GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing); + GetResponse response = getDocument(index, type, docId, field, routing); assertTrue(response.isExists()); assertNull(response.getField(field)); assertThat(response.getId(), equalTo(docId)); //same for multi get - response = multiGetDocument(index, type, docId, field, ignoreErrors, routing); + response = multiGetDocument(index, type, docId, field, routing); assertNull(response.getField(field)); assertThat(response.getId(), equalTo(docId)); assertTrue(response.isExists()); } - private GetResponse multiGetDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { + private GetResponse multiGetDocument(String index, String type, String docId, String field, @Nullable String routing) { MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).fields(field); if (routing != null) { getItem.routing(routing); } - MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem).setIgnoreErrorsOnGeneratedFields(ignoreErrors); + MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem); MultiGetResponse multiGetResponse = multiGetRequestBuilder.get(); assertThat(multiGetResponse.getResponses().length, equalTo(1)); return multiGetResponse.getResponses()[0].getResponse(); } - private GetResponse getDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { - GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(ignoreErrors); + private GetResponse getDocument(String index, String type, String docId, String field, @Nullable String routing) { + GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field); if (routing != null) { getRequestBuilder.setRouting(routing); } diff --git a/core/src/test/java/org/elasticsearch/search/internal/DefaultSearchContextTests.java b/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java similarity index 97% rename from core/src/test/java/org/elasticsearch/search/internal/DefaultSearchContextTests.java rename to core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index 9ab5af9f393..e546130b2e5 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/DefaultSearchContextTests.java +++ b/core/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.internal; +package org.elasticsearch.search; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanQuery; @@ -26,6 +26,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.search.DefaultSearchContext; import org.elasticsearch.test.ESTestCase; import static org.apache.lucene.search.BooleanClause.Occur.FILTER; diff --git a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java index 548a09d37bc..2c7ae356bf8 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchRequestTests.java @@ -56,12 +56,7 @@ public class SearchRequestTests extends ESTestCase { } }; SearchModule searchModule = new SearchModule(Settings.EMPTY, false, - Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) { - @Override - protected void configureSearch() { - // Skip me - } - }; + Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())); List entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java index a7476381b8e..b23bce86b4e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorParsingTests.java @@ -119,12 +119,7 @@ public class AggregatorParsingTests extends ESTestCase { bindMapperExtension(); } }; - SearchModule searchModule = new SearchModule(settings, false, emptyList()) { - @Override - protected void configureSearch() { - // Skip me - } - }; + SearchModule searchModule = new SearchModule(settings, false, emptyList()); List entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index b9c0e5f09c1..2cc1ca04f4d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -143,12 +143,7 @@ public abstract class BaseAggregationTestCase entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 7960e9bf4e8..0ecb50d584e 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -133,12 +133,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } }; SearchModule searchModule = new SearchModule(settings, false, - Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) { - @Override - protected void configureSearch() { - // Skip me - } - }; + Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())); List entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index eb37299306c..99a2b438ffd 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -59,12 +59,7 @@ public class ShardSearchTransportRequestTests extends ESTestCase { } }; SearchModule searchModule = new SearchModule(Settings.EMPTY, false, - Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) { - @Override - protected void configureSearch() { - // Skip me - } - }; + Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())); List entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 37da93b8257..a1be5769b61 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -19,18 +19,21 @@ package org.elasticsearch.node; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collection; -import java.util.List; /** * A node for testing which allows: @@ -62,11 +65,15 @@ public class MockNode extends Node { return new MockBigArrays(settings, circuitBreakerService); } + @Override - protected Class pickSearchServiceImplementation() { + protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService, + ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, + FetchPhase fetchPhase) { if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) { - return super.pickSearchServiceImplementation(); + return super.newSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); } - return MockSearchService.class; + return new MockSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); } } + diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index cae5b2ff95b..bf300889cd5 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -20,9 +20,6 @@ package org.elasticsearch.search; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.MockNode; @@ -69,11 +66,10 @@ public class MockSearchService extends SearchService { ACTIVE_SEARCH_CONTEXTS.remove(context); } - @Inject - public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, + public MockSearchService(ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) { - super(settings, clusterSettings, clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); + super(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 85045aa3f80..0560ec2a910 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -1045,12 +1045,7 @@ public abstract class AbstractQueryTestCase> scriptSettings.addAll(pluginsService.getPluginSettings()); scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED); SettingsModule settingsModule = new SettingsModule(nodeSettings, scriptSettings, pluginsService.getPluginSettingsFilter()); - searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)) { - @Override - protected void configureSearch() { - // Skip me - } - }; + searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)) { @Override public void configure() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 5495fb166b3..813686f4a3a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -117,7 +117,7 @@ public class TestSearchContext extends SearchContext { } @Override - public void preProcess() { + public void preProcess(boolean rewrite) { } @Override From 94625d74e46a1638be7b602c6e842cc8d9125cd9 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 12 Sep 2016 15:47:01 -0600 Subject: [PATCH 12/18] No longer allow cluster name in data path In 5.x we allowed this with a deprecation warning. This removes the code added for that deprecation, requiring the cluster name to not be in the data path. Resolves #20391 --- .../org/elasticsearch/bootstrap/Security.java | 5 --- .../elasticsearch/env/NodeEnvironment.java | 26 ----------- .../env/NodeEnvironmentTests.java | 43 ------------------- docs/reference/migration/migrate_6_0.asciidoc | 4 ++ .../migration/migrate_6_0/cluster.asciidoc | 27 ++++++++++++ .../migration/migrate_6_0/docs.asciidoc | 2 +- ...PercolatorBackwardsCompatibilityTests.java | 5 +-- .../elasticsearch/test/ESIntegTestCase.java | 4 +- 8 files changed, 36 insertions(+), 80 deletions(-) create mode 100644 docs/reference/migration/migrate_6_0/cluster.asciidoc diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index a090d70707b..e45e42757c2 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -257,11 +257,6 @@ final class Security { for (Path path : environment.dataFiles()) { addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); } - // TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder - // https://github.com/elastic/elasticsearch/issues/20391 - for (Path path : environment.dataWithClusterFiles()) { - addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); - } for (Path path : environment.repoFiles()) { addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete"); } diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index df9514cdf88..f3e1f2fb24d 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -209,13 +209,6 @@ public final class NodeEnvironment implements Closeable { for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) { Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex]; Path dataDir = environment.dataFiles()[dirIndex]; - // TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory - if (readFromDataPathWithClusterName(dataDirWithClusterName)) { - DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger); - deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " + - "Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir); - dataDir = dataDirWithClusterName; - } Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId)); Files.createDirectories(dir); @@ -289,25 +282,6 @@ public final class NodeEnvironment implements Closeable { } } - // Visible for testing - /** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */ - static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException { - if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist - Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory - dirEmpty(dataPathWithClusterName)) { // Or if it's empty - // No need to read from cluster-name folder! - return false; - } - // The "nodes" directory inside of the cluster name - Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER); - if (Files.isDirectory(nodesPath)) { - // The cluster has data in the "nodes" so we should read from the cluster-named folder for now - return true; - } - // Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate - return false; - } - private static void releaseAndNullLocks(Lock[] locks) { for (int i = 0; i < locks.length; i++) { if (locks[i] != null) { diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index e5c1c53dad7..9c11ae6b23f 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -396,49 +396,6 @@ public class NodeEnvironmentTests extends ESTestCase { env.close(); } - public void testWhetherClusterFolderShouldBeUsed() throws Exception { - Path tempNoCluster = createTempDir(); - Path tempDataPath = tempNoCluster.toAbsolutePath(); - - Path tempPath = tempNoCluster.resolve("foo"); // "foo" is the cluster name - Path tempClusterPath = tempPath.toAbsolutePath(); - - assertFalse("non-existent directory should not be used", NodeEnvironment.readFromDataPathWithClusterName(tempPath)); - Settings settings = Settings.builder() - .put("cluster.name", "foo") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build(); - try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) { - Path nodeDataPath = env.nodeDataPaths()[0]; - assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0")); - } - IOUtils.rm(tempNoCluster); - - Files.createDirectories(tempPath); - assertFalse("empty directory should not be read from", NodeEnvironment.readFromDataPathWithClusterName(tempPath)); - settings = Settings.builder() - .put("cluster.name", "foo") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build(); - try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) { - Path nodeDataPath = env.nodeDataPaths()[0]; - assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0")); - } - IOUtils.rm(tempNoCluster); - - // Create a directory for the cluster name - Files.createDirectories(tempPath.resolve(NodeEnvironment.NODES_FOLDER)); - assertTrue("there is data in the directory", NodeEnvironment.readFromDataPathWithClusterName(tempPath)); - settings = Settings.builder() - .put("cluster.name", "foo") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) - .put(Environment.PATH_DATA_SETTING.getKey(), tempClusterPath.toString()).build(); - try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) { - Path nodeDataPath = env.nodeDataPaths()[0]; - assertEquals(nodeDataPath, tempClusterPath.resolve("nodes").resolve("0")); - } - } - public void testPersistentNodeId() throws IOException { String[] paths = tmpPaths(); NodeEnvironment env = newNodeEnvironment(paths, Settings.builder() diff --git a/docs/reference/migration/migrate_6_0.asciidoc b/docs/reference/migration/migrate_6_0.asciidoc index dbf48febd18..fb0293a7e92 100644 --- a/docs/reference/migration/migrate_6_0.asciidoc +++ b/docs/reference/migration/migrate_6_0.asciidoc @@ -27,6 +27,8 @@ way to reindex old indices is to use the `reindex` API. * <> * <> * <> +* <> +* <> include::migrate_6_0/mapping.asciidoc[] @@ -35,3 +37,5 @@ include::migrate_6_0/rest.asciidoc[] include::migrate_6_0/search.asciidoc[] include::migrate_6_0/docs.asciidoc[] + +include::migrate_6_0/cluster.asciidoc[] diff --git a/docs/reference/migration/migrate_6_0/cluster.asciidoc b/docs/reference/migration/migrate_6_0/cluster.asciidoc new file mode 100644 index 00000000000..bd070d8d1f4 --- /dev/null +++ b/docs/reference/migration/migrate_6_0/cluster.asciidoc @@ -0,0 +1,27 @@ +[[breaking_60_cluster_changes]] +=== Cluster changes + +==== Cluster name no longer allowed in path.data + +Previously the cluster name could be used in the `path.data` setting with a +warning. This is now no longer allowed. For instance, in the previous version +this was valid: + +[source,sh] +-------------------------------------------------- +# Assuming path.data is /tmp/mydata +# No longer supported: +$ tree /tmp/mydata +/tmp/mydata +├── +│   └── nodes +│   └── 0 +│   └── + +# Should be changed to: +$ tree /tmp/mydata +/tmp/mydata +├── nodes +│   └── 0 +│   └── +-------------------------------------------------- diff --git a/docs/reference/migration/migrate_6_0/docs.asciidoc b/docs/reference/migration/migrate_6_0/docs.asciidoc index d5e0ae77657..5d19c000ad7 100644 --- a/docs/reference/migration/migrate_6_0/docs.asciidoc +++ b/docs/reference/migration/migrate_6_0/docs.asciidoc @@ -1,4 +1,4 @@ -[[breaking_60_document_api_changes]] +[[breaking_60_docs_changes]] === Document API changes ==== version type 'force' removed diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java index 48459858ba5..d103b7d3ecd 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java @@ -143,15 +143,14 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase { } private void setupNode() throws Exception { - Path dataDir = createTempDir(); - Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName())); + Path clusterDir = createTempDir(); try (InputStream stream = PercolatorBackwardsCompatibilityTests.class. getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) { TestUtil.unzip(stream, clusterDir); } Settings.Builder nodeSettings = Settings.builder() - .put(Environment.PATH_DATA_SETTING.getKey(), dataDir); + .put(Environment.PATH_DATA_SETTING.getKey(), clusterDir); internalCluster().startNode(nodeSettings.build()); ensureGreen(INDEX_NAME); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index f76c9e43a22..833c27f9c55 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -2064,8 +2064,8 @@ public abstract class ESIntegTestCase extends ESTestCase { } throw new IllegalStateException(builder.toString()); } - Path src = list[0]; - Path dest = dataDir.resolve(internalCluster().getClusterName()); + Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER); + Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER); assertTrue(Files.exists(src)); Files.move(src, dest); assertFalse(Files.exists(src)); From 4cf4683a64b26bf12365dbe46abac177fe66efbd Mon Sep 17 00:00:00 2001 From: chico chen Date: Tue, 13 Sep 2016 06:38:24 +0800 Subject: [PATCH 13/18] Build: can not find JAVA_HOME in eclipse gradle plugin. (#17645) --- .../src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 7ccdbcee221..4c06a48de33 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -157,7 +157,7 @@ class BuildPlugin implements Plugin { private static String findJavaHome() { String javaHome = System.getenv('JAVA_HOME') if (javaHome == null) { - if (System.getProperty("idea.active") != null) { + if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) { // intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with javaHome = Jvm.current().javaHome } else { From 3c10f90b47345b538de191ee6d43c3a742ad9bfc Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 12 Sep 2016 17:00:28 -0600 Subject: [PATCH 14/18] [TEST] Fix plugin list comparison The plugin command now displays the version of the plugin, which is compared to a string without the version. This removes the version from the string. --- .../packaging/scripts/module_and_plugin_test_cases.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index b44e5885ff8..c55d28e971c 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -300,7 +300,7 @@ fi } @test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" { - "$ESHOME/bin/elasticsearch-plugin" list > /tmp/installed + "$ESHOME/bin/elasticsearch-plugin" list | cut -d'@' -f1 > /tmp/installed compare_plugins_list "/tmp/installed" "'plugins list'" } From 494ad0d5720e7f2893ad87b58a110e1b3571d113 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Mon, 12 Sep 2016 19:47:20 -0400 Subject: [PATCH 15/18] [TESTS] for the REST cluster stats test, if free or used (#20434) memory are much less than the total memory, the percentage returned could be 0%. The yaml tests check that the free/used percentage are valid values by asserting `is_true`, but it turns out that `is_true` returns false if the value is assigned but it is 0 or even the string "0". This commit changes the assertion in the yaml test to ensure the value is greater than or equal to 0 instead. --- .../resources/rest-api-spec/test/cluster.stats/10_basic.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml index ccbfc199cec..29f048068b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yaml @@ -22,8 +22,8 @@ - is_true: nodes.os.mem.total_in_bytes - is_true: nodes.os.mem.free_in_bytes - is_true: nodes.os.mem.used_in_bytes - - is_true: nodes.os.mem.free_percent - - is_true: nodes.os.mem.used_percent + - gte: { nodes.os.mem.free_percent: 0 } + - gte: { nodes.os.mem.used_percent: 0 } - is_true: nodes.process - is_true: nodes.jvm - is_true: nodes.fs From 262a5ee3114dbcd736245b849a68d6d9ead21bbe Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 12 Sep 2016 19:48:12 -0400 Subject: [PATCH 16/18] Use HTTPS for downloading released versions This commit changes the protocol used for downloading the Maven metadata for released versions of Elasticsearch from HTTP to HTTPS. --- qa/vagrant/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 32c74a35754..7dabc2cebe9 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -122,7 +122,7 @@ task stop { Set getVersions() { Node xml - new URL('http://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> + new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> xml = new XmlParser().parse(s) } return new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /2\.\d\.\d/ }) From 567093cf788a81f13bd3ba851e02ac785bfe2a75 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 12 Sep 2016 17:58:32 +0200 Subject: [PATCH 17/18] Add Ubuntu-16.04 to Vagrant VMs (#20425) --- TESTING.asciidoc | 1 + Vagrantfile | 7 +++++++ qa/vagrant/build.gradle | 3 ++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 5046dc087b5..5cdc99c9be3 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -364,6 +364,7 @@ These are the linux flavors the Vagrantfile currently supports: * ubuntu-1204 aka precise * ubuntu-1404 aka trusty * ubuntu-1504 aka vivid +* ubuntu-1604 aka xenial * debian-8 aka jessie, the current debian stable distribution * centos-6 * centos-7 diff --git a/Vagrantfile b/Vagrantfile index fc148ee4443..761ef20628d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -37,6 +37,13 @@ Vagrant.configure(2) do |config| [ -f /usr/share/java/jayatanaag.jar ] || install jayatana SHELL end + config.vm.define "ubuntu-1604" do |config| + config.vm.box = "elastic/ubuntu-16.04-x86_64" + ubuntu_common config, extra: <<-SHELL + # Install Jayatana so we can work around it being present. + [ -f /usr/share/java/jayatanaag.jar ] || install jayatana + SHELL + end # Wheezy's backports don't contain Openjdk 8 and the backflips required to # get the sun jdk on there just aren't worth it. We have jessie for testing # debian and it works fine. diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 7dabc2cebe9..746f3291547 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -37,7 +37,8 @@ List availableBoxes = [ 'sles-12', 'ubuntu-1204', 'ubuntu-1404', - 'ubuntu-1504' + 'ubuntu-1504', + 'ubuntu-1604' ] String vagrantBoxes = getProperties().get('vagrant.boxes', 'sample') From 1ae8d6123fac354a2f43926bee10cd23c9718aa8 Mon Sep 17 00:00:00 2001 From: makeyang Date: Tue, 13 Sep 2016 17:17:39 +0800 Subject: [PATCH 18/18] Add node name to decider trace logging (#20437) Adds the entire DiscoveryNode object to the trace log in AllocationDeciders. The allocation decider logging at TRACE level can sometimes be helpful to determine why a shard is not getting allocated on specific nodes. Currently, we only log the node id for these messages. It will be helpful to also include the node name (esp. when dealing with a lot of nodes in the cluster). --- .../cluster/routing/allocation/decider/AllocationDeciders.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 5b6f145fe8f..986613e5a42 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -74,7 +74,7 @@ public class AllocationDeciders extends AllocationDecider { // short track if a NO is returned. if (decision == Decision.NO) { if (logger.isTraceEnabled()) { - logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName()); + logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName()); } // short circuit only if debugging is not enabled if (!allocation.debugDecision()) {