From 7698ab7bfc83305da7b3d7e0df3b5a52aa7e1266 Mon Sep 17 00:00:00 2001 From: Isabel Drost-Fromm Date: Tue, 17 Nov 2015 13:40:10 +0100 Subject: [PATCH 01/37] Fix typos in query dsl docs. When passing the example json snippets through the query parser while working on #14249 some of the examples could not be parsed. This PR fixes those examples. Relates to #14249 --- docs/reference/query-dsl/bool-query.asciidoc | 2 +- .../query-dsl/geo-bounding-box-query.asciidoc | 8 ++++---- docs/reference/query-dsl/has-child-query.asciidoc | 10 +++++----- docs/reference/query-dsl/mlt-query.asciidoc | 1 - 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index 8f2fdb0c99e..17bf74df1e1 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -51,7 +51,7 @@ final `_score` for each document. }, "filter": { "term" : { "tag" : "tech" } - } + }, "must_not" : { "range" : { "age" : { "from" : 10, "to" : 20 } diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index c52bcb93e7d..f751e83fc80 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -188,10 +188,10 @@ values separately. "filter" : { "geo_bounding_box" : { "pin.location" : { - "top" : -74.1, - "left" : 40.73, - "bottom" : -71.12, - "right" : 40.01 + "top" : 40.73, + "left" : -74.1, + "bottom" : 40.01, + "right" : -71.12 } } } diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index 24951bbe930..f65434242ea 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -9,7 +9,7 @@ an example: -------------------------------------------------- { "has_child" : { - "type" : "blog_tag", + "child_type" : "blog_tag", "query" : { "term" : { "tag" : "something" @@ -34,8 +34,8 @@ inside the `has_child` query: -------------------------------------------------- { "has_child" : { - "type" : "blog_tag", - "score_mode" : "sum", + "child_type" : "blog_tag", + "score_mode" : "min", "query" : { "term" : { "tag" : "something" @@ -56,8 +56,8 @@ a match: -------------------------------------------------- { "has_child" : { - "type" : "blog_tag", - "score_mode" : "sum", + "child_type" : "blog_tag", + "score_mode" : "min", "min_children": 2, <1> "max_children": 10, <1> "query" : { diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index ee4b695c2ff..ce2d34144ee 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -73,7 +73,6 @@ present in the index, the syntax is similar to < Date: Thu, 19 Nov 2015 14:47:01 +0100 Subject: [PATCH 02/37] Revert back to type instead of child_type... for has child queries. Relates to #14249 --- docs/reference/query-dsl/has-child-query.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index f65434242ea..5ffdb4a2b8d 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -9,7 +9,7 @@ an example: -------------------------------------------------- { "has_child" : { - "child_type" : "blog_tag", + "type" : "blog_tag", "query" : { "term" : { "tag" : "something" @@ -34,7 +34,7 @@ inside the `has_child` query: -------------------------------------------------- { "has_child" : { - "child_type" : "blog_tag", + "type" : "blog_tag", "score_mode" : "min", "query" : { "term" : { @@ -56,7 +56,7 @@ a match: -------------------------------------------------- { "has_child" : { - "child_type" : "blog_tag", + "type" : "blog_tag", "score_mode" : "min", "min_children": 2, <1> "max_children": 10, <1> From cc743049cf41b189aca4ed66088d868f1e1f6688 Mon Sep 17 00:00:00 2001 From: Felipe Forbeck Date: Wed, 27 Jan 2016 23:39:36 -0200 Subject: [PATCH 03/37] Skipping hidden files compilation for script service --- .../elasticsearch/script/ScriptService.java | 5 +++ .../script/ScriptServiceTests.java | 33 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 8e1ac1c8d77..2fbfc126380 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -533,6 +534,10 @@ public class ScriptService extends AbstractComponent implements Closeable { if (logger.isTraceEnabled()) { logger.trace("Loading script file : [{}]", file); } + if (FileSystemUtils.isHidden(file)) { + logger.warn("--- Hidden file skipped : [{}]", file); + return; + } Tuple scriptNameExt = scriptNameExt(file); if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0825da4d4df..f703b9d9364 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -151,6 +151,39 @@ public class ScriptServiceTests extends ESTestCase { } } + public void testHiddenFileSkipped() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); + buildScriptService(Settings.EMPTY); + + logger.info("--> setup one hidden test file"); + Path testFileHidden = scriptsFilePath.resolve(".hidden_file"); + Path testRegularFile = scriptsFilePath.resolve("test_file.tst"); + Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testFileHidden)); + Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testRegularFile)); + resourceWatcherService.notifyNow(); + + try { + logger.info("--> verify if hidden_file was skipped"); + scriptService.compile(new Script("hidden_file", ScriptType.FILE, "test", null), + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); + fail("the script hidden_file should not be processed"); + } catch (IllegalArgumentException ex) { + assertThat(ex.getMessage(), containsString("Unable to find on disk file script [hidden_file] using lang [test]")); + } + + logger.info("--> verify if test_file was correctly processed"); + CompiledScript compiledScript = scriptService.compile(new Script("test_file", ScriptType.FILE, "test", null), + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); + assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); + + logger.info("--> delete hidden file"); + Files.delete(testFileHidden); + + logger.info("--> delete test file"); + Files.delete(testRegularFile); + resourceWatcherService.notifyNow(); + } + public void testInlineScriptCompiledOnceCache() throws IOException { buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), From 5478c97a362a9f450b75af6ca6c8b2b0a4cf3f90 Mon Sep 17 00:00:00 2001 From: Felipe Forbeck Date: Tue, 2 Feb 2016 08:29:53 -0200 Subject: [PATCH 04/37] Minor fixes after review --- .../elasticsearch/script/ScriptService.java | 10 ++++---- .../script/ScriptServiceTests.java | 25 +++++++------------ 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 2fbfc126380..ae03bde5032 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -56,7 +56,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; @@ -531,13 +530,14 @@ public class ScriptService extends AbstractComponent implements Closeable { @Override public void onFileInit(Path file) { + if (FileSystemUtils.isHidden(file)) { + logger.debug("Hidden script file skipped : [{}]", file); + return; + } if (logger.isTraceEnabled()) { logger.trace("Loading script file : [{}]", file); } - if (FileSystemUtils.isHidden(file)) { - logger.warn("--- Hidden file skipped : [{}]", file); - return; - } + Tuple scriptNameExt = scriptNameExt(file); if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index f703b9d9364..e5965ed3297 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -151,36 +151,29 @@ public class ScriptServiceTests extends ESTestCase { } } - public void testHiddenFileSkipped() throws IOException { - ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); + public void testHiddenScriptFileSkipped() throws IOException { buildScriptService(Settings.EMPTY); - logger.info("--> setup one hidden test file"); - Path testFileHidden = scriptsFilePath.resolve(".hidden_file"); - Path testRegularFile = scriptsFilePath.resolve("test_file.tst"); - Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testFileHidden)); - Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testRegularFile)); + Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); + Path testFile = scriptsFilePath.resolve("test_file.tst"); + Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testHiddenFile)); + Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFile)); resourceWatcherService.notifyNow(); try { - logger.info("--> verify if hidden_file was skipped"); scriptService.compile(new Script("hidden_file", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); + ScriptContext.Standard.SEARCH, Collections.emptyMap()); fail("the script hidden_file should not be processed"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk file script [hidden_file] using lang [test]")); } - logger.info("--> verify if test_file was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_file", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); + ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); - logger.info("--> delete hidden file"); - Files.delete(testFileHidden); - - logger.info("--> delete test file"); - Files.delete(testRegularFile); + Files.delete(testHiddenFile); + Files.delete(testFile); resourceWatcherService.notifyNow(); } From f65f84e0ef8cd557b41f37f39a6982d7e9a4c6dc Mon Sep 17 00:00:00 2001 From: Felipe Forbeck Date: Tue, 2 Feb 2016 09:46:30 -0200 Subject: [PATCH 05/37] Ignoring hidden script files and files with invalid names --- .../elasticsearch/script/ScriptService.java | 77 ++++++++++--------- .../script/ScriptServiceTests.java | 51 ++++++++---- 2 files changed, 75 insertions(+), 53 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index ae03bde5032..4d1cfed9544 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -225,6 +224,8 @@ public class ScriptService extends AbstractComponent implements Closeable { return scriptEngineService; } + + /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ @@ -355,6 +356,22 @@ public class ScriptService extends AbstractComponent implements Closeable { + scriptLang + "/" + id + "]"); } + Tuple getScriptNameExt(Path file) { + Path scriptPath = scriptsDirectory.relativize(file); + int extIndex = scriptPath.toString().lastIndexOf('.'); + if (extIndex <= 0) { + return null; + } + + String ext = scriptPath.toString().substring(extIndex + 1); + if (ext.isEmpty()) { + return null; + } + + String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); + return new Tuple<>(scriptName, ext); + } + private void validate(BytesReference scriptBytes, String scriptLang) { try { XContentParser parser = XContentFactory.xContent(scriptBytes).createParser(scriptBytes); @@ -516,51 +533,37 @@ public class ScriptService extends AbstractComponent implements Closeable { private class ScriptChangesListener extends FileChangesListener { - private Tuple scriptNameExt(Path file) { - Path scriptPath = scriptsDirectory.relativize(file); - int extIndex = scriptPath.toString().lastIndexOf('.'); - if (extIndex != -1) { - String ext = scriptPath.toString().substring(extIndex + 1); - String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); - return new Tuple<>(scriptName, ext); - } else { - return null; - } - } - @Override public void onFileInit(Path file) { - if (FileSystemUtils.isHidden(file)) { - logger.debug("Hidden script file skipped : [{}]", file); + Tuple scriptNameExt = getScriptNameExt(file); + if (scriptNameExt == null) { + logger.debug("Skipped script with invalid extension : [{}]", file); return; } if (logger.isTraceEnabled()) { logger.trace("Loading script file : [{}]", file); } - Tuple scriptNameExt = scriptNameExt(file); - if (scriptNameExt != null) { - ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); - if (engineService == null) { - logger.warn("no script engine found for [{}]", scriptNameExt.v2()); - } else { - try { - //we don't know yet what the script will be used for, but if all of the operations for this lang - // with file scripts are disabled, it makes no sense to even compile it and cache it. - if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { - logger.info("compiling script file [{}]", file.toAbsolutePath()); - try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { - String script = Streams.copyToString(reader); - CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); - scriptMetrics.onCompilation(); - } - } else { - logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); + ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); + if (engineService == null) { + logger.warn("No script engine found for [{}]", scriptNameExt.v2()); + } else { + try { + //we don't know yet what the script will be used for, but if all of the operations for this lang + // with file scripts are disabled, it makes no sense to even compile it and cache it. + if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { + logger.info("compiling script file [{}]", file.toAbsolutePath()); + try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { + String script = Streams.copyToString(reader); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); + scriptMetrics.onCompilation(); } - } catch (Throwable e) { - logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); + } else { + logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); } + } catch (Throwable e) { + logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); } } } @@ -572,7 +575,7 @@ public class ScriptService extends AbstractComponent implements Closeable { @Override public void onFileDeleted(Path file) { - Tuple scriptNameExt = scriptNameExt(file); + Tuple scriptNameExt = getScriptNameExt(file); if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index e5965ed3297..60363b57b3b 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.script; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -122,26 +123,21 @@ public class ScriptServiceTests extends ESTestCase { } public void testScriptsWithoutExtensions() throws IOException { - buildScriptService(Settings.EMPTY); - logger.info("--> setup two test files one with extension and another without"); Path testFileNoExt = scriptsFilePath.resolve("test_no_ext"); Path testFileWithExt = scriptsFilePath.resolve("test_script.tst"); Streams.copy("test_file_no_ext".getBytes("UTF-8"), Files.newOutputStream(testFileNoExt)); Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); resourceWatcherService.notifyNow(); - logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); - logger.info("--> delete both files"); Files.delete(testFileNoExt); Files.delete(testFileWithExt); resourceWatcherService.notifyNow(); - logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); @@ -151,29 +147,52 @@ public class ScriptServiceTests extends ESTestCase { } } - public void testHiddenScriptFileSkipped() throws IOException { + public void testInvalidScriptNames() throws IOException { + buildScriptService(Settings.EMPTY); + + Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); + assertThat(scriptService.getScriptNameExt(testHiddenFile), org.hamcrest.Matchers.nullValue()); + + Path testWithoutName = scriptsFilePath.resolve(""); + assertThat(scriptService.getScriptNameExt(testWithoutName), org.hamcrest.Matchers.nullValue()); + + Path testDotName = scriptsFilePath.resolve("."); + assertThat(scriptService.getScriptNameExt(testDotName), org.hamcrest.Matchers.nullValue()); + + Path testWithoutExtension = scriptsFilePath.resolve("test."); + assertThat(scriptService.getScriptNameExt(testWithoutExtension), org.hamcrest.Matchers.nullValue()); + + Path testNameOnly = scriptsFilePath.resolve("test"); + assertThat(scriptService.getScriptNameExt(testNameOnly), org.hamcrest.Matchers.nullValue()); + } + + public void testValidScriptName() throws IOException { + buildScriptService(Settings.EMPTY); + + Path testTestFile = scriptsFilePath.resolve("test.ext"); + Tuple scriptNameExt = scriptService.getScriptNameExt(testTestFile); + assertThat(scriptNameExt.v1(), org.hamcrest.Matchers.equalTo("test")); + assertThat(scriptNameExt.v2(), org.hamcrest.Matchers.equalTo("ext")); + } + + public void testScriptChangesListenerOnceHiddenFileDetected() throws IOException { buildScriptService(Settings.EMPTY); Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); - Path testFile = scriptsFilePath.resolve("test_file.tst"); Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testHiddenFile)); - Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFile)); resourceWatcherService.notifyNow(); try { - scriptService.compile(new Script("hidden_file", ScriptType.FILE, "test", null), + String invalidScriptName = ""; + scriptService.compile(new Script(invalidScriptName, ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, Collections.emptyMap()); - fail("the script hidden_file should not be processed"); + fail("the script .hidden_file should not be processed"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), containsString("Unable to find on disk file script [hidden_file] using lang [test]")); + //script without name because it is a hidden file + assertThat(ex.getMessage(), containsString("Unable to find on disk file script [] using lang [test]")); } - CompiledScript compiledScript = scriptService.compile(new Script("test_file", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, Collections.emptyMap()); - assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); - Files.delete(testHiddenFile); - Files.delete(testFile); resourceWatcherService.notifyNow(); } From 95a76a5921ff180999db68e2b4e775dc8fa92d44 Mon Sep 17 00:00:00 2001 From: Felipe Forbeck Date: Tue, 1 Mar 2016 08:37:40 -0300 Subject: [PATCH 06/37] testing script compiled once dot files detected --- .../elasticsearch/script/ScriptService.java | 34 +++++++------- .../script/ScriptServiceTests.java | 47 ++++--------------- 2 files changed, 25 insertions(+), 56 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 4d1cfed9544..cfc402dbb04 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -356,22 +356,6 @@ public class ScriptService extends AbstractComponent implements Closeable { + scriptLang + "/" + id + "]"); } - Tuple getScriptNameExt(Path file) { - Path scriptPath = scriptsDirectory.relativize(file); - int extIndex = scriptPath.toString().lastIndexOf('.'); - if (extIndex <= 0) { - return null; - } - - String ext = scriptPath.toString().substring(extIndex + 1); - if (ext.isEmpty()) { - return null; - } - - String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); - return new Tuple<>(scriptName, ext); - } - private void validate(BytesReference scriptBytes, String scriptLang) { try { XContentParser parser = XContentFactory.xContent(scriptBytes).createParser(scriptBytes); @@ -533,6 +517,22 @@ public class ScriptService extends AbstractComponent implements Closeable { private class ScriptChangesListener extends FileChangesListener { + private Tuple getScriptNameExt(Path file) { + Path scriptPath = scriptsDirectory.relativize(file); + int extIndex = scriptPath.toString().lastIndexOf('.'); + if (extIndex <= 0) { + return null; + } + + String ext = scriptPath.toString().substring(extIndex + 1); + if (ext.isEmpty()) { + return null; + } + + String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); + return new Tuple<>(scriptName, ext); + } + @Override public void onFileInit(Path file) { Tuple scriptNameExt = getScriptNameExt(file); @@ -553,7 +553,7 @@ public class ScriptService extends AbstractComponent implements Closeable { // with file scripts are disabled, it makes no sense to even compile it and cache it. if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { logger.info("compiling script file [{}]", file.toAbsolutePath()); - try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { + try (InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { String script = Streams.copyToString(reader); CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 60363b57b3b..a369b44e2b1 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -147,52 +146,22 @@ public class ScriptServiceTests extends ESTestCase { } } - public void testInvalidScriptNames() throws IOException { - buildScriptService(Settings.EMPTY); - - Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); - assertThat(scriptService.getScriptNameExt(testHiddenFile), org.hamcrest.Matchers.nullValue()); - - Path testWithoutName = scriptsFilePath.resolve(""); - assertThat(scriptService.getScriptNameExt(testWithoutName), org.hamcrest.Matchers.nullValue()); - - Path testDotName = scriptsFilePath.resolve("."); - assertThat(scriptService.getScriptNameExt(testDotName), org.hamcrest.Matchers.nullValue()); - - Path testWithoutExtension = scriptsFilePath.resolve("test."); - assertThat(scriptService.getScriptNameExt(testWithoutExtension), org.hamcrest.Matchers.nullValue()); - - Path testNameOnly = scriptsFilePath.resolve("test"); - assertThat(scriptService.getScriptNameExt(testNameOnly), org.hamcrest.Matchers.nullValue()); - } - - public void testValidScriptName() throws IOException { - buildScriptService(Settings.EMPTY); - - Path testTestFile = scriptsFilePath.resolve("test.ext"); - Tuple scriptNameExt = scriptService.getScriptNameExt(testTestFile); - assertThat(scriptNameExt.v1(), org.hamcrest.Matchers.equalTo("test")); - assertThat(scriptNameExt.v2(), org.hamcrest.Matchers.equalTo("ext")); - } - - public void testScriptChangesListenerOnceHiddenFileDetected() throws IOException { + public void testScriptCompiledOnceHiddenFileDetected() throws IOException { buildScriptService(Settings.EMPTY); Path testHiddenFile = scriptsFilePath.resolve(".hidden_file"); Streams.copy("test_hidden_file".getBytes("UTF-8"), Files.newOutputStream(testHiddenFile)); + + Path testFileScript = scriptsFilePath.resolve("file_script.tst"); + Streams.copy("test_file_script".getBytes("UTF-8"), Files.newOutputStream(testFileScript)); resourceWatcherService.notifyNow(); - try { - String invalidScriptName = ""; - scriptService.compile(new Script(invalidScriptName, ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, Collections.emptyMap()); - fail("the script .hidden_file should not be processed"); - } catch (IllegalArgumentException ex) { - //script without name because it is a hidden file - assertThat(ex.getMessage(), containsString("Unable to find on disk file script [] using lang [test]")); - } + CompiledScript compiledScript = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), + ScriptContext.Standard.SEARCH, Collections.emptyMap()); + assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file_script")); Files.delete(testHiddenFile); + Files.delete(testFileScript); resourceWatcherService.notifyNow(); } From d9ddd3fa458819031563237965c084ea756d391f Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 7 Mar 2016 12:23:30 +0100 Subject: [PATCH 07/37] Remove leniency from segments info integrity checks Closes #16973 --- .../main/java/org/elasticsearch/common/lucene/Lucene.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 39f34ad867e..54e5738e78c 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -235,11 +235,7 @@ public class Lucene { @Override protected Object doBody(String segmentFileName) throws IOException { try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) { - final int format = input.readInt(); - if (format == CodecUtil.CODEC_MAGIC) { - CodecUtil.checksumEntireFile(input); - } - // legacy.... + CodecUtil.checksumEntireFile(input); } return null; } From a4b5fbedb886f85837a48c4a895f9c7d9a8c4094 Mon Sep 17 00:00:00 2001 From: Isabel Drost-Fromm Date: Tue, 8 Mar 2016 10:28:26 +0100 Subject: [PATCH 08/37] Moves SortParser:parse(...) to only require QueryShardContext This removes the need for accessing the SearchContext when parsing Sort elements to queries. After applying the patch only a QueryShardContext is needed. Relates to #15178 --- .../index/fielddata/IndexFieldData.java | 15 +++++++++---- .../support/NestedInnerQueryParseSupport.java | 5 ++--- .../search/sort/GeoDistanceSortParser.java | 21 ++++++++++--------- .../search/sort/ScriptSortParser.java | 21 +++++++++++-------- .../search/sort/SortParseElement.java | 15 +++++++------ .../elasticsearch/search/sort/SortParser.java | 4 ++-- .../fielddata/AbstractFieldDataTestCase.java | 2 +- .../search/sort/SortParserTests.java | 4 ++-- 8 files changed, 48 insertions(+), 39 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index ffa23bf56e4..172e16d8f35 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -20,10 +20,14 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; @@ -122,11 +126,11 @@ public interface IndexFieldData extends IndexCompone public static class Nested { private final BitSetProducer rootFilter; - private final Weight innerFilter; + private final Query innerQuery; - public Nested(BitSetProducer rootFilter, Weight innerFilter) { + public Nested(BitSetProducer rootFilter, Query innerQuery) { this.rootFilter = rootFilter; - this.innerFilter = innerFilter; + this.innerQuery = innerQuery; } /** @@ -140,7 +144,10 @@ public interface IndexFieldData extends IndexCompone * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { - Scorer s = innerFilter.scorer(ctx); + final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); + IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); + Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); + Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java index 9923728e3bd..86983026b19 100644 --- a/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ b/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -61,8 +60,8 @@ public class NestedInnerQueryParseSupport { protected ObjectMapper nestedObjectMapper; private ObjectMapper parentObjectMapper; - public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) { - shardContext = searchContext.getQueryShardContext(); + public NestedInnerQueryParseSupport(XContentParser parser, QueryShardContext context) { + shardContext = context; parseContext = shardContext.parseContext(); shardContext.reset(parser); diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 27c8b8e0ed5..b9407b31bf6 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -43,9 +43,9 @@ import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -62,7 +62,7 @@ public class GeoDistanceSortParser implements SortParser { } @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { + public SortField parse(XContentParser parser, QueryShardContext context) throws Exception { String fieldName = null; List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; @@ -71,7 +71,7 @@ public class GeoDistanceSortParser implements SortParser { MultiValueMode sortMode = null; NestedInnerQueryParseSupport nestedHelper = null; - final boolean indexCreatedBeforeV2_0 = context.indexShard().indexSettings().getIndexVersionCreated().before(Version.V_2_0_0); + final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED; @@ -155,12 +155,12 @@ public class GeoDistanceSortParser implements SortParser { throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } - MappedFieldType fieldType = context.smartNameFieldType(fieldName); + MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); } final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class - final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(fieldType); + final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType); final FixedSourceDistance[] distances = new FixedSourceDistance[geoPoints.size()]; for (int i = 0; i< geoPoints.size(); i++) { distances[i] = geoDistance.fixedSourceDistance(geoPoints.get(i).lat(), geoPoints.get(i).lon(), unit); @@ -168,15 +168,16 @@ public class GeoDistanceSortParser implements SortParser { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); + Query innerDocumentsQuery; if (nestedHelper.filterFound()) { // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); + innerDocumentsQuery = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); + innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); } else { nested = null; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index e4fe2c08f75..c30ea503d80 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldData; @@ -37,6 +38,7 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; @@ -68,7 +70,7 @@ public class ScriptSortParser implements SortParser { } @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { + public SortField parse(XContentParser parser, QueryShardContext context) throws Exception { ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); Script script = null; String type = null; @@ -122,19 +124,20 @@ public class ScriptSortParser implements SortParser { script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); } } else if (params != null) { - throw new SearchParseException(context, "script params must be specified inside script object", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "script params must be specified inside script object"); } if (script == null) { - throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the script to sort by"); } if (type == null) { - throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "_script sorting requires setting the type of the script"); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); + final SearchScript searchScript = context.getScriptService().search( + context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "type [string] doesn't support mode [" + sortMode + "]"); } if (sortMode == null) { @@ -144,7 +147,7 @@ public class ScriptSortParser implements SortParser { // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); Query innerDocumentsFilter; if (nestedHelper.filterFound()) { // TODO: use queries instead @@ -152,7 +155,7 @@ public class ScriptSortParser implements SortParser { } else { innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { nested = null; } @@ -205,7 +208,7 @@ public class ScriptSortParser implements SortParser { }; break; default: - throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); + throw new ParsingException(parser.getTokenLocation(), "custom script sort type [" + type + "] not supported"); } return new SortField("_script", fieldComparatorSource, reverse); diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index a99158787d3..83538bd9672 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -140,7 +140,7 @@ public class SortParseElement implements SearchParseElement { addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); } else { if (PARSERS.containsKey(fieldName)) { - sortFields.add(PARSERS.get(fieldName).parse(parser, context)); + sortFields.add(PARSERS.get(fieldName).parse(parser, context.getQueryShardContext())); } else { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -168,7 +168,7 @@ public class SortParseElement implements SearchParseElement { sortMode = MultiValueMode.fromString(parser.text()); } else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) { if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); + nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext()); } nestedFilterParseHelper.setPath(parser.text()); } else { @@ -177,7 +177,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.START_OBJECT) { if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); + nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context.getQueryShardContext()); } nestedFilterParseHelper.filter(); } else { @@ -239,14 +239,13 @@ public class SortParseElement implements SearchParseElement { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; + Query innerDocumentsQuery; if (nestedHelper.filterFound()) { - // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); + innerDocumentsQuery = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); + innerDocumentsQuery = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); } else { nested = null; } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParser.java b/core/src/main/java/org/elasticsearch/search/sort/SortParser.java index 6383afd8845..727e576a85e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortParser.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.sort; import org.apache.lucene.search.SortField; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.index.query.QueryShardContext; /** * @@ -30,5 +30,5 @@ public interface SortParser { String[] names(); - SortField parse(XContentParser parser, SearchContext context) throws Exception; + SortField parse(XContentParser parser, QueryShardContext context) throws Exception; } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 6f8b5a45df0..66487c54bf2 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -168,7 +168,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { protected Nested createNested(IndexSearcher searcher, Query parentFilter, Query childFilter) throws IOException { BitsetFilterCache s = indexService.cache().bitsetFilterCache(); - return new Nested(s.getBitSetProducer(parentFilter), searcher.createNormalizedWeight(childFilter, false)); + return new Nested(s.getBitSetProducer(parentFilter), childFilter); } public void testEmpty() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java index cbd7b5468b2..0c64b7e7b15 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java @@ -50,7 +50,7 @@ public class SortParserTests extends ESSingleNodeTestCase { XContentParser parser = XContentHelper.createParser(sortBuilder.bytes()); parser.nextToken(); GeoDistanceSortParser geoParser = new GeoDistanceSortParser(); - geoParser.parse(parser, context); + geoParser.parse(parser, context.getQueryShardContext()); sortBuilder = jsonBuilder(); sortBuilder.startObject(); @@ -139,6 +139,6 @@ public class SortParserTests extends ESSingleNodeTestCase { XContentParser parser = XContentHelper.createParser(sortBuilder.bytes()); parser.nextToken(); GeoDistanceSortParser geoParser = new GeoDistanceSortParser(); - geoParser.parse(parser, context); + geoParser.parse(parser, context.getQueryShardContext()); } } From 997fccde09fa405bd62766281393b7aac8962d7a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Mar 2016 10:06:09 +0100 Subject: [PATCH 09/37] Remove unused delete logger in IndexingSlowLog The delete logger is a leftover and has no usage in this class. --- .../java/org/elasticsearch/index/IndexingSlowLog.java | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 75d3d60daad..a74838b3b61 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -52,7 +52,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { private SlowLogLevel level; private final ESLogger indexLogger; - private final ESLogger deleteLogger; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); @@ -76,16 +75,14 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, true, Setting.Scope.INDEX); IndexingSlowLog(IndexSettings indexSettings) { - this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"), - Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete")); + this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings())); } /** * Build with the specified loggers. Only used to testing. */ - IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) { + IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger) { this.indexLogger = indexLogger; - this.deleteLogger = deleteLogger; this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); @@ -111,7 +108,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void setLevel(SlowLogLevel level) { this.level = level; this.indexLogger.setLevel(level.name()); - this.deleteLogger.setLevel(level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { From 716e7267f3d1a1df7915956ac4e2f38e2ed4b3b2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Mar 2016 10:09:23 +0100 Subject: [PATCH 10/37] Remove unused test-only constructor from IndexingSlowLog --- .../java/org/elasticsearch/index/IndexingSlowLog.java | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index a74838b3b61..d6fa552b203 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -75,14 +75,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { }, true, Setting.Scope.INDEX); IndexingSlowLog(IndexSettings indexSettings) { - this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings())); - } - - /** - * Build with the specified loggers. Only used to testing. - */ - IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger) { - this.indexLogger = indexLogger; + this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); From 7b5b0d451159198706b98f0da4cd770c7a31be2a Mon Sep 17 00:00:00 2001 From: Isabel Drost-Fromm Date: Tue, 26 Jan 2016 12:35:51 +0100 Subject: [PATCH 11/37] Move missing() from SortBuilder interface to class As mentioned by @cbuescher on #16151 this method is really implemented only in the FieldSortBuilder. Moving the method down. Relates to #15178 --- .../elasticsearch/search/sort/FieldSortBuilder.java | 1 - .../search/sort/GeoDistanceSortBuilder.java | 10 ---------- .../elasticsearch/search/sort/ScoreSortBuilder.java | 5 ----- .../elasticsearch/search/sort/ScriptSortBuilder.java | 8 -------- .../org/elasticsearch/search/sort/SortBuilder.java | 6 ------ 5 files changed, 30 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 4f082b057da..67ceb75a29c 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -68,7 +68,6 @@ public class FieldSortBuilder extends SortBuilder { * Sets the value when a field is missing in a doc. Can also be set to _last or * _first to sort missing last or first respectively. */ - @Override public FieldSortBuilder missing(Object missing) { this.missing = missing; return this; diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index e37eed61c6d..708152af1f0 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -218,16 +218,6 @@ public class GeoDistanceSortBuilder extends SortBuilder return this.order; } - /** - * Not relevant. - * - * TODO should this throw an exception rather than silently ignore a parameter that is not used? - */ - @Override - public GeoDistanceSortBuilder missing(Object missing) { - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 7435ff95f45..e70a34a7c3b 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -41,11 +41,6 @@ public class ScoreSortBuilder extends SortBuilder { return this; } - @Override - public SortBuilder missing(Object missing) { - return this; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("_score"); diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index e9a9c8df57c..d02e4dc520a 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -62,14 +62,6 @@ public class ScriptSortBuilder extends SortBuilder { return this; } - /** - * Not really relevant. - */ - @Override - public SortBuilder missing(Object missing) { - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index da80506dde2..0935b76ece9 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -45,10 +45,4 @@ public abstract class SortBuilder implements ToXContent { * The order of sorting. Defaults to {@link SortOrder#ASC}. */ public abstract SortBuilder order(SortOrder order); - - /** - * Sets the value when a field is missing in a doc. Can also be set to _last or - * _first to sort missing last or first respectively. - */ - public abstract SortBuilder missing(Object missing); } From b9b5c15fe137a05646881f910eae6f701cd1c4d7 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 9 Mar 2016 11:39:20 +0100 Subject: [PATCH 12/37] test: ensure the each node sees 2 nodes. --- .../ingest/IngestProcessorNotInstalledOnAllNodesIT.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index abfe18f8c58..a415b0992a7 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -67,9 +67,11 @@ public class IngestProcessorNotInstalledOnAllNodesIT extends ESIntegTestCase { public void testFailPipelineCreation() throws Exception { installPlugin = true; - internalCluster().startNode(); + String node1 = internalCluster().startNode(); installPlugin = false; - internalCluster().startNode(); + String node2 = internalCluster().startNode(); + ensureStableCluster(2, node1); + ensureStableCluster(2, node2); try { client().admin().cluster().preparePutPipeline("_id", pipelineSource).get(); From f8ab6a6669c6c364ffff3141cee44208d9d8c09e Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Mar 2016 11:43:18 +0100 Subject: [PATCH 13/37] [TEST] Make boost more prominent in test since with new default similarity it might score lower without the boost --- .../java/org/elasticsearch/search/query/MultiMatchQueryIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index e0bc26c9296..23e2592447b 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -567,7 +567,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase { // test if boosts work searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 2) + .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").field("last_name", 10) .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .operator(Operator.AND))).get(); assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted From 7a53a396e43341d8a8b552e17706d6e996ea7dd2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 9 Mar 2016 12:10:47 +0100 Subject: [PATCH 14/37] Remove Unneded @Inject annotations --- .../index/analysis/IcuTransformTokenFilterFactory.java | 2 -- .../elasticsearch/index/analysis/KuromojiAnalyzerProvider.java | 2 -- .../index/analysis/KuromojiBaseFormFilterFactory.java | 2 -- .../index/analysis/KuromojiKatakanaStemmerFactory.java | 2 -- .../index/analysis/KuromojiReadingFormFilterFactory.java | 2 -- .../index/analysis/PhoneticTokenFilterFactory.java | 2 -- .../index/analysis/SmartChineseAnalyzerProvider.java | 2 -- 7 files changed, 14 deletions(-) diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java index 6ecdf3888e9..f145ad4ae30 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTransformTokenFilterFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis; import com.ibm.icu.text.Transliterator; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.icu.ICUTransformFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,7 +35,6 @@ public class IcuTransformTokenFilterFactory extends AbstractTokenFilterFactory { private final int dir; private final Transliterator transliterator; - @Inject public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.id = settings.get("id", "Null"); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java index 8aa8ff3c1dd..21d9b804055 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiAnalyzerProvider.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.ja.JapaneseAnalyzer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.dict.UserDictionary; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -36,7 +35,6 @@ public class KuromojiAnalyzerProvider extends AbstractIndexAnalyzerProvider stopWords = Analysis.parseStopWords(env, settings, JapaneseAnalyzer.getDefaultStopSet()); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java index e191d78198f..aa035d9edfd 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiBaseFormFilterFactory.java @@ -21,14 +21,12 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseBaseFormFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; public class KuromojiBaseFormFilterFactory extends AbstractTokenFilterFactory { - @Inject public KuromojiBaseFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java index ebebdcb6bba..491f48e34c1 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiKatakanaStemmerFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -30,7 +29,6 @@ public class KuromojiKatakanaStemmerFactory extends AbstractTokenFilterFactory { private final int minimumLength; - @Inject public KuromojiKatakanaStemmerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); minimumLength = settings.getAsInt("minimum_length", JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH); diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java index 59d1088fd1b..d0eb0cecdb9 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiReadingFormFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ja.JapaneseReadingFormFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -30,7 +29,6 @@ public class KuromojiReadingFormFilterFactory extends AbstractTokenFilterFactory private final boolean useRomaji; - @Inject public KuromojiReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); useRomaji = settings.getAsBoolean("use_romaji", false); diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java index e33f1f1e7e2..75da19c0a3c 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/index/analysis/PhoneticTokenFilterFactory.java @@ -38,7 +38,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.BeiderMorseFilter; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.phonetic.PhoneticFilter; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -58,7 +57,6 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory { private NameType nametype; private RuleType ruletype; - @Inject public PhoneticTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.languageset = null; diff --git a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java index 22fcf238725..591912b8fa3 100644 --- a/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java +++ b/plugins/analysis-smartcn/src/main/java/org/elasticsearch/index/analysis/SmartChineseAnalyzerProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -31,7 +30,6 @@ public class SmartChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider< private final SmartChineseAnalyzer analyzer; - @Inject public SmartChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); From 11b18a996378bd83065fdc835c7dee9921c44aa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 8 Mar 2016 15:06:10 +0100 Subject: [PATCH 15/37] Sort: Make ScoreSortBuilder implement NamedWriteable and add fromXContent parsing This change makes ScoreSortBuilder implement NamedWriteable, adds equals() and hashCode() and also implements parsing ScoreSortBuilder back from xContent. This is needed for the ongoing Search refactoring. --- .../search/sort/ScoreSortBuilder.java | 93 +++++++++++++++++-- .../search/sort/SortElementParserTemp.java | 3 +- .../search/sort/AbstractSortTestCase.java | 21 ++--- .../sort/GeoDistanceSortBuilderTests.java | 72 +++++++------- .../search/sort/ScoreSortBuilderTests.java | 82 ++++++++++++++++ 5 files changed, 213 insertions(+), 58 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index e70a34a7c3b..5d1a0d82987 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -19,35 +19,116 @@ package org.elasticsearch.search.sort; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; +import java.util.Objects; /** * A sort builder allowing to sort by score. - * - * */ -public class ScoreSortBuilder extends SortBuilder { +public class ScoreSortBuilder extends SortBuilder implements NamedWriteable, + SortElementParserTemp { - private SortOrder order; + private static final String NAME = "_score"; + static final ScoreSortBuilder PROTOTYPE = new ScoreSortBuilder(); + public static final ParseField REVERSE_FIELD = new ParseField("reverse"); + public static final ParseField ORDER_FIELD = new ParseField("order"); + private SortOrder order = SortOrder.DESC; /** * The order of sort scoring. By default, its {@link SortOrder#DESC}. */ @Override public ScoreSortBuilder order(SortOrder order) { + Objects.requireNonNull(order, "sort order cannot be null."); this.order = order; return this; } + /** + * Get the order of sort scoring. By default, its {@link SortOrder#DESC}. + */ + public SortOrder order() { + return this.order; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("_score"); + builder.startObject(NAME); if (order == SortOrder.ASC) { - builder.field("reverse", true); + builder.field(REVERSE_FIELD.getPreferredName(), true); } builder.endObject(); return builder; } + + @Override + public ScoreSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { + XContentParser parser = context.parser(); + ParseFieldMatcher matcher = context.parseFieldMatcher(); + + XContentParser.Token token; + String currentName = parser.currentName(); + ScoreSortBuilder result = new ScoreSortBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (matcher.match(currentName, REVERSE_FIELD)) { + if (parser.booleanValue()) { + result.order(SortOrder.ASC); + } + // else we keep the default DESC + } else if (matcher.match(currentName, ORDER_FIELD)) { + result.order(SortOrder.fromString(parser.text())); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); + } + } + return result; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ScoreSortBuilder other = (ScoreSortBuilder) object; + return Objects.equals(order, other.order); + } + + @Override + public int hashCode() { + return Objects.hash(this.order); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + order.writeTo(out); + } + + @Override + public ScoreSortBuilder readFrom(StreamInput in) throws IOException { + return new ScoreSortBuilder().order(SortOrder.readOrderFrom(in)); + } + + @Override + public String getWriteableName() { + return NAME; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java b/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java index 8893471b6c1..069f1380b49 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java @@ -19,13 +19,12 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.index.query.QueryParseContext; import java.io.IOException; // TODO once sort refactoring is done this needs to be merged into SortBuilder -public interface SortElementParserTemp { +public interface SortElementParserTemp { /** * Creates a new SortBuilder from the json held by the {@link SortElementParserTemp} * in {@link org.elasticsearch.common.xcontent.XContent} format diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index dfea1a9316b..dc61f0ef34c 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -43,7 +43,7 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public abstract class AbstractSortTestCase & ToXContent & SortElementParserTemp> extends ESTestCase { +public abstract class AbstractSortTestCase & SortElementParserTemp> extends ESTestCase { protected static NamedWriteableRegistry namedWriteableRegistry; @@ -53,7 +53,8 @@ public abstract class AbstractSortTestCase & ToXCont @BeforeClass public static void init() { namedWriteableRegistry = new NamedWriteableRegistry(); - namedWriteableRegistry.registerPrototype(GeoDistanceSortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE); indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry(); } @@ -85,9 +86,9 @@ public abstract class AbstractSortTestCase & ToXCont XContentParser itemParser = XContentHelper.createParser(builder.bytes()); itemParser.nextToken(); - + /* - * filter out name of sort, or field name to sort on for element fieldSort + * filter out name of sort, or field name to sort on for element fieldSort */ itemParser.nextToken(); String elementName = itemParser.currentName(); @@ -95,7 +96,7 @@ public abstract class AbstractSortTestCase & ToXCont QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); context.reset(itemParser); - NamedWriteable parsedItem = testItem.fromXContent(context, elementName); + SortBuilder parsedItem = testItem.fromXContent(context, elementName); assertNotSame(testItem, parsedItem); assertEquals(testItem, parsedItem); assertEquals(testItem.hashCode(), parsedItem.hashCode()); @@ -146,17 +147,15 @@ public abstract class AbstractSortTestCase & ToXCont } } + @SuppressWarnings("unchecked") protected T copyItem(T original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { - @SuppressWarnings("unchecked") - T prototype = (T) namedWriteableRegistry.getPrototype(getPrototype(), original.getWriteableName()); - T copy = (T) prototype.readFrom(in); - return copy; + T prototype = (T) namedWriteableRegistry.getPrototype(SortBuilder.class, + original.getWriteableName()); + return prototype.readFrom(in); } } } - - protected abstract Class getPrototype(); } diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index e957db58b38..611053b14d5 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -60,7 +60,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase getPrototype() { - return (Class) GeoDistanceSortBuilder.PROTOTYPE.getClass(); - } - public void testSortModeSumIsRejectedInSetter() { GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("testname", -1, -1); GeoPoint point = RandomGeoGenerator.randomPoint(getRandom()); @@ -189,23 +183,23 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase { + + @Override + protected ScoreSortBuilder createTestItem() { + return new ScoreSortBuilder().order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC); + } + + @Override + protected ScoreSortBuilder mutate(ScoreSortBuilder original) throws IOException { + ScoreSortBuilder result = new ScoreSortBuilder(); + if (original.order() == SortOrder.ASC) { + result.order(SortOrder.DESC); + } else { + result.order(SortOrder.ASC); + } + return result; + } + + @Rule + public ExpectedException exceptionRule = ExpectedException.none(); + + /** + * test passing null to {@link ScoreSortBuilder#order(SortOrder)} is illegal + */ + public void testIllegalOrder() { + exceptionRule.expect(NullPointerException.class); + exceptionRule.expectMessage("sort order cannot be null."); + new ScoreSortBuilder().order(null); + } + + /** + * test parsing order parameter if specified as `order` field in the json + * instead of the `reverse` field that we render in toXContent + */ + public void testParseOrder() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; + String scoreSortString = "{ \"_score\": { \"order\": \""+ order.toString() +"\" }}"; + XContentParser parser = XContentFactory.xContent(scoreSortString).createParser(scoreSortString); + // need to skip until parser is located on second START_OBJECT + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + + context.reset(parser); + ScoreSortBuilder scoreSort = ScoreSortBuilder.PROTOTYPE.fromXContent(context, "_score"); + assertEquals(order, scoreSort.order()); + } +} From 06929f8ed42b74cf490533448a90741a4a8e1bd8 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 9 Mar 2016 15:32:54 +0100 Subject: [PATCH 16/37] Merge pull request #17030 from 36degrees/patch-1 Fix typo in clear cache documentation --- docs/reference/indices/clearcache.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 21008e5b46b..8ebb9e3488a 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -2,7 +2,7 @@ == Clear Cache The clear cache API allows to clear either all caches or specific cached -associated with one ore more indices. +associated with one or more indices. [source,js] -------------------------------------------------- From e411cbb0600ad119dacfbd214bba994f4f2aa4c0 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Tue, 1 Mar 2016 18:10:46 -0500 Subject: [PATCH 17/37] Fixes the DiscoveryWithServiceDisruptionsIT#testIndicesDeleted test In particular, this test ensures we don't restart the master node until we know the index deletion has taken effect on master and the master eligible nodes. Closes #16917 Closes #16890 --- .../DiscoveryWithServiceDisruptionsIT.java | 34 ++++++++++++++----- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b9d7107ed54..3948a4bab90 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -177,13 +177,17 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { + configureUnicastCluster(DEFAULT_SETTINGS, numberOfNodes, unicastHostsOrdinals, minimumMasterNode); + } + + private void configureUnicastCluster(Settings settings, int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException { if (minimumMasterNode < 0) { minimumMasterNode = numberOfNodes / 2 + 1; } logger.info("---> configured unicast"); // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() - .put(DEFAULT_SETTINGS) + .put(settings) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); @@ -196,7 +200,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } } - /** * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 */ @@ -1075,25 +1078,40 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { * Tests that indices are properly deleted even if there is a master transition in between. * Test for https://github.com/elastic/elasticsearch/issues/11665 */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16890") public void testIndicesDeleted() throws Exception { - configureUnicastCluster(3, null, 2); + final Settings settings = Settings.builder() + .put(DEFAULT_SETTINGS) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0s") // don't wait on isolated data node + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed + .build(); + final String idxName = "test"; + configureUnicastCluster(settings, 3, null, 2); InternalTestCluster.Async> masterNodes = internalCluster().startMasterOnlyNodesAsync(2); InternalTestCluster.Async dataNode = internalCluster().startDataOnlyNodeAsync(); dataNode.get(); - masterNodes.get(); + final List allMasterEligibleNodes = masterNodes.get(); ensureStableCluster(3); assertAcked(prepareCreate("test")); ensureYellow(); - String masterNode1 = internalCluster().getMasterName(); + final String masterNode1 = internalCluster().getMasterName(); NetworkPartition networkPartition = new NetworkUnresponsivePartition(masterNode1, dataNode.get(), getRandom()); internalCluster().setDisruptionScheme(networkPartition); networkPartition.startDisrupting(); - internalCluster().client(masterNode1).admin().indices().prepareDelete("test").setTimeout("1s").get(); + // We know this will time out due to the partition, we check manually below to not proceed until + // the delete has been applied to the master node and the master eligible node. + internalCluster().client(masterNode1).admin().indices().prepareDelete(idxName).setTimeout("0s").get(); + // Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node. + assertBusy(() -> { + for (String masterNode : allMasterEligibleNodes) { + final ClusterState masterState = internalCluster().clusterService(masterNode).state(); + assertTrue("index not deleted on " + masterNode, masterState.metaData().hasIndex(idxName) == false && + masterState.status() == ClusterState.ClusterStateStatus.APPLIED); + } + }); internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); ensureYellow(); - assertFalse(client().admin().indices().prepareExists("test").get().isExists()); + assertFalse(client().admin().indices().prepareExists(idxName).get().isExists()); } protected NetworkPartition addRandomPartition() { From d09ee3f174666620007f804fa5985e0e6ba5879c Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Fri, 8 Jan 2016 22:32:41 -0600 Subject: [PATCH 18/37] Remove .geohash suffix from GeoDistanceQuery and GeoDistanceRangeQuery Occasionally the .geohash suffix in Geo{Distance|DistanceRange}Query would conflict with a mapping that defines a sub-field by the same name. This occurs often with nested and multi-fields a mapping defines a geo_point sub-field using the field name "geohash". Since the QueryParser already handles parsing geohash encoded geopoints without requiring the ".geohash" suffix, the suffix parsing can be removed altogether. This commit removes the .geohash suffix parsing, adds explicit test coverage for the nested query use-case, and adds random distance queries to the nested query test suite. --- .../mapper/geo/BaseGeoPointFieldMapper.java | 1 - .../index/query/GeoDistanceQueryParser.java | 3 -- .../query/GeoDistanceRangeQueryParser.java | 9 ------ .../index/query/AbstractQueryTestCase.java | 3 +- .../query/GeoDistanceRangeQueryTests.java | 32 +++++++++++++++++++ .../query/GeohashCellQueryBuilderTests.java | 2 +- .../index/query/NestedQueryBuilderTests.java | 1 + 7 files changed, 36 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 5e617dd6815..f72533d30cf 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -65,7 +65,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr public static final String LON = "lon"; public static final String LON_SUFFIX = "." + LON; public static final String GEOHASH = "geohash"; - public static final String GEOHASH_SUFFIX = "." + GEOHASH; public static final String IGNORE_MALFORMED = "ignore_malformed"; } diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryParser.java index c35a31f8d84..3828f786903 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryParser.java @@ -120,9 +120,6 @@ public class GeoDistanceQueryParser implements QueryParser> protected static final String DATE_FIELD_NAME = "mapped_date"; protected static final String OBJECT_FIELD_NAME = "mapped_object"; protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; + protected static final String GEO_POINT_FIELD_MAPPING = "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true"; protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; protected static final String[] MAPPED_FIELD_NAMES = new String[] { STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, GEO_SHAPE_FIELD_NAME }; @@ -300,7 +301,7 @@ public abstract class AbstractQueryTestCase> BOOLEAN_FIELD_NAME, "type=boolean", DATE_FIELD_NAME, "type=date", OBJECT_FIELD_NAME, "type=object", - GEO_POINT_FIELD_NAME, "type=geo_point,lat_lon=true,geohash=true,geohash_prefix=true", + GEO_POINT_FIELD_NAME, GEO_POINT_FIELD_MAPPING, GEO_SHAPE_FIELD_NAME, "type=geo_shape" ).string()), MapperService.MergeReason.MAPPING_UPDATE, false); // also add mappings for two inner field in the object field diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java index f07e695a1a0..cb0c374c5c0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceRangeQueryTests.java @@ -24,10 +24,12 @@ import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceRangeQuery; import org.apache.lucene.spatial.util.GeoDistanceUtils; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.Version; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery; import org.elasticsearch.test.geo.RandomGeoGenerator; @@ -296,6 +298,36 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase assertThat(query, instanceOf(TermQuery.class)); TermQuery termQuery = (TermQuery) query; Term term = termQuery.getTerm(); - assertThat(term.field(), equalTo(queryBuilder.fieldName() + GeoPointFieldMapper.Names.GEOHASH_SUFFIX)); + assertThat(term.field(), equalTo(queryBuilder.fieldName() + "." + GeoPointFieldMapper.Names.GEOHASH)); String geohash = queryBuilder.geohash(); if (queryBuilder.precision() != null) { int len = Math.min(queryBuilder.precision(), geohash.length()); diff --git a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 0f7e2e67e38..beef2df15d4 100644 --- a/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -52,6 +52,7 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase Date: Wed, 9 Mar 2016 19:43:56 +0300 Subject: [PATCH 19/37] Don't return all indices immediately if count of expressions >1 and first expression is * #17027 --- .../metadata/IndexNameExpressionResolver.java | 2 +- .../metadata/WildcardExpressionResolverTests.java | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index cca633a7651..9bd4ba6112b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -686,7 +686,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { } private boolean isEmptyOrTrivialWildcard(List expressions) { - return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0))); + return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); } private List resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) { diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index d9cf9f0d790..744477d6722 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -47,6 +47,8 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testConvertWildcardsTests() { @@ -107,6 +109,18 @@ public class WildcardExpressionResolverTests extends ESTestCase { assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*Y*X"))).size(), equalTo(0)); } + public void testAll() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("testXXX")) + .put(indexBuilder("testXYY")) + .put(indexBuilder("testYYY")); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); + + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + } + private IndexMetaData.Builder indexBuilder(String index) { return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } From e72dac91b3c63e92c53d2beb8d9d4d5c234d3235 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Mar 2016 10:41:17 +0100 Subject: [PATCH 20/37] Use index UUID to lookup indices on IndicesService Today we use the index name to lookup index instances on the IndicesService which applied to search reqeusts but also to index deletion etc. This commit moves the interface to expcet and `Index` instance which is a tuple and looks up the index by uuid rather than by name. This prevents accidential modificaiton of the wrong index if and index is recreated or searching from the _wrong_ index in such a case. Accessing an index that has the same name but different UUID will now result in an IndexNotFoundException. Closes #17001 --- .../TransportClearIndicesCacheAction.java | 2 +- .../TransportIndicesSegmentsAction.java | 2 +- .../action/bulk/TransportShardBulkAction.java | 6 +- .../action/search/ShardSearchFailure.java | 6 +- .../suggest/TransportSuggestAction.java | 2 +- .../TransportReplicationAction.java | 7 +- .../InstanceShardOperationRequest.java | 13 ++- ...ransportInstanceSingleOperationAction.java | 2 +- .../TransportShardMultiTermsVectorAction.java | 6 +- .../action/update/TransportUpdateAction.java | 16 ++-- .../action/update/UpdateRequest.java | 5 +- .../cluster/ClusterChangedEvent.java | 16 ++-- .../metadata/MetaDataCreateIndexService.java | 12 +-- .../metadata/MetaDataIndexAliasesService.java | 7 +- .../metadata/MetaDataMappingService.java | 73 ++++++++------- .../elasticsearch/indices/IndicesService.java | 88 ++++++++----------- .../cluster/IndicesClusterStateService.java | 37 ++++---- .../indices/recovery/RecoverySource.java | 2 +- .../indices/store/IndicesStore.java | 2 +- .../TransportNodesListShardStoreMetaData.java | 4 +- .../elasticsearch/search/SearchException.java | 9 +- .../elasticsearch/search/SearchService.java | 7 +- .../search/SearchShardTarget.java | 52 +++++------ .../controller/SearchPhaseController.java | 2 +- .../fetch/ScrollQueryFetchSearchResult.java | 3 +- .../search/internal/InternalSearchHit.java | 3 +- .../search/internal/InternalSearchHits.java | 5 +- .../internal/ShardSearchLocalRequest.java | 18 ++-- .../search/internal/ShardSearchRequest.java | 5 +- .../internal/ShardSearchTransportRequest.java | 7 +- .../search/query/ScrollQuerySearchResult.java | 3 +- .../search/suggest/SuggestParseElement.java | 8 +- .../suggest/SuggestionSearchContext.java | 24 ++--- .../suggest/phrase/PhraseSuggester.java | 2 +- .../shards/IndicesShardStoreRequestIT.java | 5 +- ...ortInstanceSingleOperationActionTests.java | 14 +-- .../cluster/ClusterChangedEventTests.java | 4 +- .../index/IndexWithShadowReplicasIT.java | 10 ++- .../query/plugin/CustomQueryParserIT.java | 2 +- .../index/shard/IndexShardTests.java | 56 ++++++------ .../IndexingMemoryControllerTests.java | 10 +-- ...dicesLifecycleListenerSingleNodeTests.java | 10 ++- .../indices/IndicesServiceTests.java | 8 +- .../flush/SyncedFlushSingleNodeTests.java | 12 +-- .../indices/recovery/IndexRecoveryIT.java | 7 +- .../indices/settings/UpdateSettingsIT.java | 4 +- .../indices/state/RareClusterStateIT.java | 4 +- .../store/IndicesStoreIntegrationIT.java | 3 +- .../recovery/RecoveriesCollectionTests.java | 2 +- .../search/child/ParentFieldLoadingIT.java | 4 +- .../messy/tests/GeoShapeIntegrationTests.java | 4 +- .../elasticsearch/test/ESIntegTestCase.java | 13 ++- .../test/ESSingleNodeTestCase.java | 11 ++- .../test/InternalTestCluster.java | 5 +- 54 files changed, 334 insertions(+), 310 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index bc229d72b1b..7bc9f50252a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc @Override protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { - IndexService service = indicesService.indexService(shardRouting.getIndexName()); + IndexService service = indicesService.indexService(shardRouting.index()); if (service != null) { IndexShard shard = service.getShardOrNull(shardRouting.id()); boolean clearedAtLeastOne = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index fd45e22a171..f700a198e2c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -93,7 +93,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi @Override protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) { - IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndexName()); + IndexService indexService = indicesService.indexServiceSafe(shardRouting.index()); IndexShard indexShard = indexService.getShard(shardRouting.id()); return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose())); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 30f6b03a116..f1eeae35e08 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -104,8 +105,9 @@ public class TransportShardBulkAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) { - final IndexService indexService = indicesService.indexServiceSafe(request.index()); - final IndexShard indexShard = indexService.getShard(request.shardId().id()); + ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; diff --git a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 0139186562c..2a01eb4e1c6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -32,8 +32,6 @@ import org.elasticsearch.search.SearchShardTarget; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; - /** * Represents a failure to search on a specific shard. */ @@ -106,7 +104,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public int shardId() { if (shardTarget != null) { - return shardTarget.shardId(); + return shardTarget.shardId().id(); } return -1; } @@ -133,7 +131,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { @Override public void readFrom(StreamInput in) throws IOException { if (in.readBoolean()) { - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); } reason = in.readString(); status = RestStatus.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java index 0ed98578557..7a354060774 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction() { diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index ccdf934958d..94b0e745a8e 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -75,12 +75,12 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc @Override protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) { - MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); + final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.id()); for (int i = 0; i < request.locations.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); try { - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.getShard(shardId.id()); TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest); termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime()); response.add(request.locations.get(i), termVectorsResponse); diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 0aefa825f2a..75feeb8fbca 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -51,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; @@ -147,8 +148,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio @Override protected ShardIterator shards(ClusterState clusterState, UpdateRequest request) { - if (request.shardId() != -1) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId()).primaryShardIt(); + if (request.getShardId() != null) { + return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt(); } ShardIterator shardIterator = clusterService.operationRouting() .indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing()); @@ -167,8 +168,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } protected void shardOperation(final UpdateRequest request, final ActionListener listener, final int retryCount) { - final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); - final IndexShard indexShard = indexService.getShard(request.shardId()); + final ShardId shardId = request.getShardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.operation()) { case UPSERT: @@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", - retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id()); + retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override protected void doRun() { @@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio break; case NONE: UpdateResponse update = result.action(); - IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); + IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex()); if (indexServiceOrNull != null) { - IndexShard shard = indexService.getShardOrNull(request.shardId()); + IndexShard shard = indexService.getShardOrNull(shardId.getId()); if (shard != null) { shard.noopUpdate(request.type()); } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6bc69ed4d9c..14c127c0703 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; @@ -88,7 +89,7 @@ public class UpdateRequest extends InstanceShardOperationRequest } public UpdateRequest(String index, String type, String id) { - this.index = index; + super(index); this.type = type; this.id = id; } @@ -195,7 +196,7 @@ public class UpdateRequest extends InstanceShardOperationRequest return parent; } - int shardId() { + public ShardId getShardId() { return this.shardId; } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index e851b7814da..c8a7924ba0f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.Collections; @@ -120,7 +121,7 @@ public class ClusterChangedEvent { /** * Returns the indices deleted in this event */ - public List indicesDeleted() { + public List indicesDeleted() { // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous @@ -131,17 +132,18 @@ public class ClusterChangedEvent { if (metaDataChanged() == false || isNewCluster()) { return Collections.emptyList(); } - List deleted = null; - for (ObjectCursor cursor : previousState.metaData().indices().keys()) { - String index = cursor.value; - if (!state.metaData().hasIndex(index)) { + List deleted = null; + for (ObjectCursor cursor : previousState.metaData().indices().values()) { + IndexMetaData index = cursor.value; + IndexMetaData current = state.metaData().index(index.getIndex().getName()); + if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) { if (deleted == null) { deleted = new ArrayList<>(); } - deleted.add(index); + deleted.add(index.getIndex()); } } - return deleted == null ? Collections.emptyList() : deleted; + return deleted == null ? Collections.emptyList() : deleted; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 62f3ad802a0..177c46e5537 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; @@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { - boolean indexCreated = false; + Index createdIndex = null; String removalReason = null; try { validate(request, currentState); @@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { // Set up everything, now locally create the index to see that things are ok, and apply final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); // create the index here (on the master) to validate it can be created, as well as adding the mapping - indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); - indexCreated = true; + final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); + createdIndex = indexService.index(); // now add the mappings - IndexService indexService = indicesService.indexServiceSafe(request.index()); MapperService mapperService = indexService.mapperService(); // first, add the default mapping if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { @@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { removalReason = "cleaning up after validating index on master"; return updatedState; } finally { - if (indexCreated) { + if (createdIndex != null) { // Index was already partially created - need to clean up - indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index"); + indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 52154bd2c04..1f0eaf0cda0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { - List indicesToClose = new ArrayList<>(); + List indicesToClose = new ArrayList<>(); Map indices = new HashMap<>(); try { for (AliasAction aliasAction : request.actions()) { @@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex()); continue; } - indicesToClose.add(indexMetaData.getIndex().getName()); + indicesToClose.add(indexMetaData.getIndex()); } indices.put(indexMetaData.getIndex().getName(), indexService); } @@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } return currentState; } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for alias processing"); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c06a5cc7c1c..51095a2d0de 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent { MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); for (Map.Entry> entry : tasksPerIndex.entrySet()) { - String index = entry.getKey(); - IndexMetaData indexMetaData = mdBuilder.get(index); + IndexMetaData indexMetaData = mdBuilder.get(entry.getKey()); if (indexMetaData == null) { // index got deleted on us, ignore... - logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index); + logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey()); continue; } + final Index index = indexMetaData.getIndex(); // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the latest (based on order) update mapping one per node List allIndexTasks = entry.getValue(); @@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent { if (indexMetaData.isSameUUID(task.indexUUID)) { hasTaskWithRightUUID = true; } else { - logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); + logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task); } } if (hasTaskWithRightUUID == false) { @@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent { // construct the actual index if needed, and make sure the relevant mappings are there boolean removeIndex = false; - IndexService indexService = indicesService.indexService(index); + IndexService indexService = indicesService.indexService(indexMetaData.getIndex()); if (indexService == null) { // we need to create the index here, and add the current mapping to it, so we can merge indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); @@ -208,47 +209,57 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - Set indicesToClose = new HashSet<>(); + public BatchResult execute(ClusterState currentState, + List tasks) throws Exception { + Set indicesToClose = new HashSet<>(); BatchResult.Builder builder = BatchResult.builder(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { - // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up - for (String index : request.indices()) { - final IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData != null && indicesService.hasIndex(index) == false) { - // if we don't have the index, we will throw exceptions later; - indicesToClose.add(index); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); - // add mappings for all types, we need them for cross-type validation - for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + final List indices = new ArrayList<>(request.indices().length); + try { + for (String index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData != null) { + if (indicesService.hasIndex(indexMetaData.getIndex()) == false) { + // if the index does not exists we create it once, add all types to the mapper service and + // close it later once we are done with mapping update + indicesToClose.add(indexMetaData.getIndex()); + IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, + Collections.emptyList()); + // add mappings for all types, we need them for cross-type validation + for (ObjectCursor mapping : indexMetaData.getMappings().values()) { + indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), + MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + } + } + indices.add(indexMetaData.getIndex()); + } else { + // we didn't find the index in the clusterstate - maybe it was deleted + // NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing + throw new IndexNotFoundException(index); } } - } - } - for (PutMappingClusterStateUpdateRequest request : tasks) { - try { - currentState = applyRequest(currentState, request); + currentState = applyRequest(currentState, request, indices); builder.success(request); } catch (Throwable t) { builder.failure(request, t); } } - return builder.build(currentState); } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); } } } - private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { + private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request, + List indices) throws IOException { String mappingType = request.type(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); - for (String index : request.indices()) { + final MetaData metaData = currentState.metaData(); + for (Index index : indices) { IndexService indexService = indicesService.indexServiceSafe(index); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; @@ -270,7 +281,7 @@ public class MetaDataMappingService extends AbstractComponent { // and a put mapping api call, so we don't which type did exist before. // Also the order of the mappings may be backwards. if (newMapper.parentFieldMapper().active()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); + IndexMetaData indexMetaData = metaData.index(index); for (ObjectCursor mapping : indexMetaData.getMappings().values()) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); @@ -290,11 +301,11 @@ public class MetaDataMappingService extends AbstractComponent { if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } - MetaData.Builder builder = MetaData.builder(currentState.metaData()); - for (String index : request.indices()) { + MetaData.Builder builder = MetaData.builder(metaData); + for (Index index : indices) { // do the actual merge here on the master, and update the mapping source IndexService indexService = indicesService.indexService(index); - if (indexService == null) { + if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above continue; } @@ -326,7 +337,7 @@ public class MetaDataMappingService extends AbstractComponent { } } - IndexMetaData indexMetaData = currentState.metaData().index(index); + IndexMetaData indexMetaData = metaData.index(index); if (indexMetaData == null) { throw new IndexNotFoundException(index); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 440a11a1904..6fd833471ed 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -103,6 +103,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -185,14 +186,14 @@ public class IndicesService extends AbstractLifecycleComponent i ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop - Set indices = new HashSet<>(this.indices.keySet()); + final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(indices.size()); - for (final String index : indices) { + for (final Index index : indices) { indicesStopExecutor.execute(() -> { try { removeIndex(index, "shutdown", false); } catch (Throwable e) { - logger.warn("failed to remove index on stop [" + index + "]", e); + logger.warn("failed to remove index on stop " + index + "", e); } finally { latch.countDown(); } @@ -256,7 +257,7 @@ public class IndicesService extends AbstractLifecycleComponent i } Map> statsByShard = new HashMap<>(); - for (IndexService indexService : indices.values()) { + for (IndexService indexService : this) { for (IndexShard indexShard : indexService) { try { if (indexShard.routingEntry() == null) { @@ -290,17 +291,8 @@ public class IndicesService extends AbstractLifecycleComponent i return indices.values().iterator(); } - public boolean hasIndex(String index) { - return indices.containsKey(index); - } - - /** - * Returns an IndexService for the specified index if exists otherwise returns null. - * - */ - @Nullable - public IndexService indexService(String index) { - return indices.get(index); + public boolean hasIndex(Index index) { + return indices.containsKey(index.getUUID()); } /** @@ -309,33 +301,21 @@ public class IndicesService extends AbstractLifecycleComponent i */ @Nullable public IndexService indexService(Index index) { - return indexService(index.getName()); - } - - /** - * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. - */ - public IndexService indexServiceSafe(String index) { - IndexService indexService = indexService(index); - if (indexService == null) { - throw new IndexNotFoundException(index); - } - return indexService; + return indices.get(index.getUUID()); } /** * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ public IndexService indexServiceSafe(Index index) { - IndexService indexService = indexServiceSafe(index.getName()); - if (indexService.indexUUID().equals(index.getUUID()) == false) { + IndexService indexService = indices.get(index.getUUID()); + if (indexService == null) { throw new IndexNotFoundException(index); } + assert indexService.indexUUID().equals(index.getUUID()) : "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID(); return indexService; } - - /** * Creates a new {@link IndexService} for the given metadata. * @param indexMetaData the index metadata to create the index for @@ -346,10 +326,13 @@ public class IndicesService extends AbstractLifecycleComponent i if (!lifecycle.started()) { throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed"); } + if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { + throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); + } final Index index = indexMetaData.getIndex(); final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); - if (indices.containsKey(index.getName())) { + if (hasIndex(index)) { throw new IndexAlreadyExistsException(index); } logger.debug("creating Index [{}], shards [{}]/[{}{}]", @@ -378,7 +361,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { assert indexService.getIndexEventListener() == listener; listener.afterIndexCreated(indexService); - indices = newMapBuilder(indices).put(index.getName(), indexService).immutableMap(); + indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap(); success = true; return indexService; } finally { @@ -395,22 +378,24 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to remove * @param reason the high level reason causing this removal */ - public void removeIndex(String index, String reason) { + public void removeIndex(Index index, String reason) { removeIndex(index, reason, false); } - private void removeIndex(String index, String reason, boolean delete) { + private void removeIndex(Index index, String reason, boolean delete) { + final String indexName = index.getName(); try { final IndexService indexService; final IndexEventListener listener; synchronized (this) { - if (indices.containsKey(index) == false) { + if (hasIndex(index) == false) { return; } - logger.debug("[{}] closing ... (reason [{}])", index, reason); + logger.debug("[{}] closing ... (reason [{}])", indexName, reason); Map newIndices = new HashMap<>(indices); - indexService = newIndices.remove(index); + indexService = newIndices.remove(index.getUUID()); + assert indexService != null : "IndexService is null for index: " + index; indices = unmodifiableMap(newIndices); listener = indexService.getIndexEventListener(); } @@ -419,9 +404,9 @@ public class IndicesService extends AbstractLifecycleComponent i if (delete) { listener.beforeIndexDeleted(indexService); } - logger.debug("[{}] closing index service (reason [{}])", index, reason); + logger.debug("{} closing index service (reason [{}])", index, reason); indexService.close(reason, delete); - logger.debug("[{}] closed... (reason [{}])", index, reason); + logger.debug("{} closed... (reason [{}])", index, reason); listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings()); if (delete) { final IndexSettings indexSettings = indexService.getIndexSettings(); @@ -474,12 +459,12 @@ public class IndicesService extends AbstractLifecycleComponent i * Deletes the given index. Persistent parts of the index * like the shards files, state and transaction logs are removed once all resources are released. * - * Equivalent to {@link #removeIndex(String, String)} but fires + * Equivalent to {@link #removeIndex(Index, String)} but fires * different lifecycle events to ensure pending resources of this index are immediately removed. * @param index the index to delete * @param reason the high level reason causing this delete */ - public void deleteIndex(String index, String reason) throws IOException { + public void deleteIndex(Index index, String reason) throws IOException { removeIndex(index, reason, true); } @@ -505,16 +490,17 @@ public class IndicesService extends AbstractLifecycleComponent i public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { - String indexName = metaData.getIndex().getName(); - if (indices.containsKey(indexName)) { - String localUUid = indices.get(indexName).indexUUID(); - throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); + Index index = metaData.getIndex(); + if (hasIndex(index)) { + String localUUid = indexService(index).indexUUID(); + throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } - if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { + + if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here - final IndexMetaData index = clusterState.metaData().index(indexName); - throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); + final IndexMetaData idxMeta = clusterState.metaData().index(index.getName()); + throw new IllegalStateException("Can't delete closed index store for [" + index.getName() + "] - it's still part of the cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } } final IndexSettings indexSettings = buildIndexSettings(metaData); @@ -607,7 +593,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @return true if the index can be deleted on this node */ public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) { - final IndexService indexService = this.indices.get(index.getName()); + final IndexService indexService = indexService(index); // Closed indices may be deleted, even if they are on a shared // filesystem. Since it is closed we aren't deleting it for relocation if (indexSettings.isOnSharedFilesystem() == false || closed) { @@ -634,7 +620,7 @@ public class IndicesService extends AbstractLifecycleComponent i */ public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { assert shardId.getIndex().equals(indexSettings.getIndex()); - final IndexService indexService = this.indices.get(shardId.getIndexName()); + final IndexService indexService = indexService(shardId.getIndex()); if (indexSettings.isOnSharedFilesystem() == false) { if (indexService != null && nodeEnv.hasNodeFile()) { return indexService.hasShard(shardId.id()) == false; diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 7998afb7656..af667f356e8 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; @@ -157,13 +158,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { @Override public void handle(final IndexShard.ShardFailure shardFailure) { - final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex()); final ShardRouting shardRouting = shardFailure.routing; threadPool.generic().execute(() -> { synchronized (mutex) { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 9a5c23fc2e1..934730c7c93 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -83,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe } private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException { - final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); final IndexShard shard = indexService.getShard(request.shardId().id()); // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index d0aec817ee9..6e9859efb2e 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -348,7 +348,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe return null; } ShardId shardId = request.shardId; - IndexService indexService = indicesService.indexService(shardId.getIndexName()); + IndexService indexService = indicesService.indexService(shardId.getIndex()); if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { return indexService.getShardOrNull(shardId.id()); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index bcc2d7f74c4..e009cbf04d1 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -126,7 +126,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction imp } final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.getShard(request.shardId()); - - SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId().getIndex(), request.shardId()); + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index d3958505d70..d675a93b691 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,28 +23,38 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** * The target that the search request was executed on. */ -public class SearchShardTarget implements Streamable, Comparable { +public class SearchShardTarget implements Writeable, Comparable { private Text nodeId; private Text index; - private int shardId; + private ShardId shardId; - private SearchShardTarget() { + public SearchShardTarget(StreamInput in) throws IOException { + if (in.readBoolean()) { + nodeId = in.readText(); + } + shardId = ShardId.readShardId(in); + index = new Text(shardId.getIndexName()); + } + public SearchShardTarget(String nodeId, ShardId shardId) { + this.nodeId = nodeId == null ? null : new Text(nodeId); + this.index = new Text(shardId.getIndexName()); + this.shardId = shardId; } public SearchShardTarget(String nodeId, Index index, int shardId) { - this.nodeId = nodeId == null ? null : new Text(nodeId); - this.index = new Text(index.getName()); - this.shardId = shardId; + this(nodeId, new ShardId(index, shardId)); } @Nullable @@ -73,36 +83,26 @@ public class SearchShardTarget implements Streamable, Comparable o1, AtomicArray.Entry o2) { int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); if (i == 0) { - i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId(); + i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); } return i; } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index fb0fc75299f..dbaee5b64bb 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQueryFetchSearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); result = readQueryFetchSearchResult(in); result.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index c6afe325bb3..dcbcce503a4 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -55,7 +55,6 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; @@ -638,7 +637,7 @@ public class InternalSearchHit implements SearchHit { if (context.streamShardTarget() == ShardTargetType.STREAM) { if (in.readBoolean()) { - shard = readSearchShardTarget(in); + shard = new SearchShardTarget(in); } } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) { int lookupId = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java index 9e787cf2aa9..09d11e1a1a3 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -34,7 +34,6 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.Map; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit; /** @@ -216,7 +215,7 @@ public class InternalSearchHits implements SearchHits { // read the lookup table first int lookupSize = in.readVInt(); for (int i = 0; i < lookupSize; i++) { - context.handleShardLookup().put(in.readVInt(), readSearchShardTarget(in)); + context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in)); } } @@ -262,4 +261,4 @@ public class InternalSearchHits implements SearchHits { } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0f46461f4a2..56ad8ed9467 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -58,8 +58,7 @@ import static org.elasticsearch.search.Scroll.readScroll; public class ShardSearchLocalRequest implements ShardSearchRequest { - private String index; - private int shardId; + private ShardId shardId; private int numberOfShards; private SearchType searchType; private Scroll scroll; @@ -97,8 +96,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, Boolean requestCache) { - this.index = shardId.getIndexName(); - this.shardId = shardId.id(); + this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; @@ -106,13 +104,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.requestCache = requestCache; } - @Override - public String index() { - return index; - } @Override - public int shardId() { + public ShardId shardId() { return shardId; } @@ -177,8 +171,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { - index = in.readString(); - shardId = in.readVInt(); + shardId = ShardId.readShardId(in); searchType = SearchType.fromId(in.readByte()); numberOfShards = in.readVInt(); if (in.readBoolean()) { @@ -195,8 +188,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { - out.writeString(index); - out.writeVInt(shardId); + shardId.writeTo(out); out.writeByte(searchType.id()); if (!asKey) { out.writeVInt(numberOfShards); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 1f0b3d1f188..82ff69078aa 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -34,9 +35,7 @@ import java.io.IOException; */ public interface ShardSearchRequest { - String index(); - - int shardId(); + ShardId shardId(); String[] types(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 48ea31c170a..dc19f84c7a7 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -71,13 +72,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha return originalIndices.indicesOptions(); } - @Override - public String index() { - return shardSearchLocalRequest.index(); - } @Override - public int shardId() { + public ShardId shardId() { return shardSearchLocalRequest.shardId(); } diff --git a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index ebb7615da44..bcdd94adf89 100644 --- a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQuerySearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); queryResult = readQuerySearchResult(in); queryResult.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java index a8a4e9ec26b..a6cf877a1f8 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext.SuggestionContext; @@ -45,12 +46,12 @@ public final class SuggestParseElement implements SearchParseElement { @Override public void parse(XContentParser parser, SearchContext context) throws Exception { SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.fieldData(), - context.shardTarget().index(), context.shardTarget().shardId()); + context.shardTarget().shardId()); context.suggest(suggestionSearchContext); } - public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, IndexFieldDataService fieldDataService, - String index, int shardId) throws IOException { + public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, + IndexFieldDataService fieldDataService, ShardId shardId) throws IOException { SuggestionSearchContext suggestionSearchContext = new SuggestionSearchContext(); BytesRef globalText = null; @@ -119,7 +120,6 @@ public final class SuggestParseElement implements SearchParseElement { SuggestionContext suggestionContext = entry.getValue(); suggestionContext.setShard(shardId); - suggestionContext.setIndex(index); SuggestUtils.verifySuggestion(mapperService, globalText, suggestionContext); suggestionSearchContext.addSuggestion(suggestionName, suggestionContext); } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index 1d3339e0578..48e4fb5dc0b 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.shard.ShardId; import java.util.LinkedHashMap; import java.util.Map; @@ -36,9 +37,9 @@ public class SuggestionSearchContext { public Map suggestions() { return suggestions; } - + public static class SuggestionContext { - + private BytesRef text; private BytesRef prefix; private BytesRef regex; @@ -47,9 +48,8 @@ public class SuggestionSearchContext { private Analyzer analyzer; private int size = 5; private int shardSize = -1; - private int shardId; - private String index; - + private ShardId shardId; + public BytesRef getText() { return text; } @@ -119,20 +119,12 @@ public class SuggestionSearchContext { } this.shardSize = shardSize; } - - public void setShard(int shardId) { + + public void setShard(ShardId shardId) { this.shardId = shardId; } - public void setIndex(String index) { - this.index = index; - } - - public String getIndex() { - return index; - } - - public int getShard() { + public ShardId getShard() { return shardId; } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index 7838eacd960..74e7f90600a 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -117,7 +117,7 @@ public final class PhraseSuggester extends Suggester { vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString()); final ExecutableScript executable = scriptService.executable(collateScript, vars); final BytesReference querySource = (BytesReference) executable.run(); - IndexService indexService = indicesService.indexService(suggestion.getIndex()); + IndexService indexService = indicesService.indexService(suggestion.getShard().getIndex()); final ParsedQuery parsedQuery = indexService.newQueryShardContext().parse(querySource); collateMatch = Lucene.exists(searcher, parsedQuery.query()); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 2e39c39cfd2..c31993ebb81 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; @@ -157,6 +158,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "5") .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) )); + indexRandomData(index); ensureGreen(index); @@ -165,9 +167,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { logger.info("--> corrupt random shard copies"); Map> corruptedShardIDMap = new HashMap<>(); + Index idx = resolveIndex(index); for (String node : internalCluster().nodesInclude(index)) { IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); - IndexService indexShards = indexServices.indexServiceSafe(index); + IndexService indexShards = indexServices.indexServiceSafe(idx); for (Integer shardId : indexShards.shardIds()) { IndexShard shard = indexShards.getShard(shardId); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index cf7b6745c8e..462a44e08b4 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -113,7 +113,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { @Override protected ShardIterator shards(ClusterState clusterState, Request request) { - return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId).primaryShardIt(); + return clusterState.routingTable().index(request.concreteIndex()).shard(request.shardId.getId()).primaryShardIt(); } } @@ -178,7 +178,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testBasicRequestWorks() throws InterruptedException, ExecutionException, TimeoutException { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); @@ -189,7 +189,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testFailureWithoutRetry() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); @@ -215,7 +215,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testSuccessAfterRetryWithClusterStateUpdate() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); boolean local = randomBoolean(); clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.INITIALIZING)); @@ -231,7 +231,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testSuccessAfterRetryWithExceptionFromTransport() throws Exception { Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); boolean local = randomBoolean(); clusterService.setState(ClusterStateCreationUtils.state("test", local, ShardRoutingState.STARTED)); @@ -250,7 +250,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { public void testRetryOfAnAlreadyTimedOutRequest() throws Exception { Request request = new Request().index("test").timeout(new TimeValue(0, TimeUnit.MILLISECONDS)); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); @@ -299,7 +299,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { } }; Request request = new Request().index("test"); - request.shardId = 0; + request.shardId = new ShardId("test", "_na_", 0); PlainActionFuture listener = new PlainActionFuture<>(); clusterService.setState(ClusterStateCreationUtils.state("test", randomBoolean(), ShardRoutingState.STARTED)); action.new AsyncSingleAction(request, listener).start(); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index cefd3a6703a..fc43f4154d1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -37,6 +38,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -220,7 +222,7 @@ public class ClusterChangedEventTests extends ESTestCase { final ClusterState newState = nextState(previousState, changeClusterUUID, addedIndices, delIndices, 0); final ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); final List addsFromEvent = event.indicesCreated(); - final List delsFromEvent = event.indicesDeleted(); + final List delsFromEvent = event.indicesDeleted().stream().map((s) -> s.getName()).collect(Collectors.toList()); Collections.sort(addsFromEvent); Collections.sort(delsFromEvent); assertThat(addsFromEvent, equalTo(addedIndices)); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index a7d127a60c8..aa3da8fc840 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -156,10 +156,11 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); refresh(); - + Index index = resolveIndex("foo-copy"); for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { - if (service.hasIndex("foo-copy")) { - IndexShard shard = service.indexServiceSafe("foo-copy").getShardOrNull(0); + + if (service.hasIndex(index)) { + IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0); if (shard.routingEntry().primary()) { assertFalse(shard instanceof ShadowIndexShard); } else { @@ -201,8 +202,9 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); + Index index = resolveIndex(IDX); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService(IDX); + IndexService indexService = service.indexService(index); if (indexService != null) { IndexShard shard = indexService.getShard(0); TranslogStats translogStats = shard.translogStats(); diff --git a/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index 1758d95a554..ec405bd8407 100644 --- a/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/core/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -68,7 +68,7 @@ public class CustomQueryParserIT extends ESIntegTestCase { private static QueryShardContext queryShardContext() { IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class); - return indicesService.indexServiceSafe("index").newQueryShardContext(); + return indicesService.indexServiceSafe(resolveIndex("index")).newQueryShardContext(); } //see #11120 diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e70ca9ec6de..1acf4e3fa1b 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -193,7 +193,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(getShardStateMetadata(shard), shardStateMetaData); @@ -226,7 +226,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); @@ -281,7 +281,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe("test"); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); IndexShard indexShard = indexService.getShardOrNull(0); client().admin().indices().prepareDelete("test").get(); assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); @@ -303,7 +303,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexServiceSafe("test"); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); IndexShard indexShard = indexService.getShardOrNull(0); assertEquals(0, indexShard.getActiveOperationsCount()); Releasable operation1 = indexShard.acquirePrimaryOperationLock(); @@ -320,11 +320,11 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test").setSource("{}").get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - indicesService.indexService("test").getShardOrNull(0).checkIdle(0); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); assertBusy(() -> { IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - indicesService.indexService("test").getShardOrNull(0).checkIdle(0); + indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); } ); IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); @@ -345,7 +345,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); client().prepareIndex("test", "bar", "1").setSource("{}").get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); setDurability(shard, Translog.Durability.REQUEST); assertFalse(shard.getEngine().getTranslog().syncNeeded()); @@ -385,7 +385,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().prepareIndex("test", "test").setSource("{}").get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexShard test = indicesService.indexService("test").getShardOrNull(0); + IndexShard test = indicesService.indexService(resolveIndex("test")).getShardOrNull(0); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); client().prepareIndex("test", "test").setSource("{}").get(); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); @@ -396,7 +396,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { public void testUpdatePriority() { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(IndexMetaData.SETTING_PRIORITY, 200)); - IndexService indexService = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get(); assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); @@ -410,7 +410,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardPath shardPath = shard.shardPath(); Path dataPath = shardPath.getDataPath(); @@ -530,7 +530,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); @@ -570,7 +570,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); client().prepareIndex("test_iol", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test_iol"); + IndexService test = indicesService.indexService(resolveIndex("test_iol")); IndexShard shard = test.getShardOrNull(0); AtomicInteger preIndex = new AtomicInteger(); AtomicInteger postIndex = new AtomicInteger(); @@ -669,7 +669,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test", settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST).build()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); @@ -703,7 +703,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); @@ -749,7 +749,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ).get()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); CountDownLatch latch = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { @@ -779,7 +779,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ).get()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; @@ -830,7 +830,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); int translogOps = 1; client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -861,7 +861,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); if (randomBoolean()) { @@ -892,7 +892,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -945,7 +945,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); ShardRouting origRouting = shard.routingEntry(); assertThat(shard.state(), equalTo(IndexShardState.STARTED)); @@ -967,8 +967,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test_target"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); - IndexService test_target = indicesService.indexService("test_target"); + IndexService test = indicesService.indexService(resolveIndex("test")); + IndexService test_target = indicesService.indexService(resolveIndex("test_target")); final IndexShard test_shard = test.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); @@ -1029,7 +1029,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); @@ -1078,7 +1078,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get(); @@ -1126,7 +1126,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(true).get(); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @@ -1179,7 +1179,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("testindexfortranslogsync"); + IndexService test = indicesService.indexService(resolveIndex("testindexfortranslogsync")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); @@ -1206,7 +1206,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("index"); + IndexService test = indicesService.indexService(resolveIndex("index")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); @@ -1235,7 +1235,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { .endObject().endObject().endObject()).get(); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("index"); + IndexService test = indicesService.indexService(resolveIndex("index")); IndexShard shard = test.getShardOrNull(0); ShardRouting routing = new ShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index afb9673508a..4f08c497443 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -161,7 +161,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { public void testShardAdditionAndRemoval() { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); @@ -194,7 +194,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "5mb") @@ -248,7 +248,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { public void testThrottling() throws Exception { createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 0).build()); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService test = indicesService.indexService("test"); + IndexService test = indicesService.indexService(resolveIndex("test")); MockController controller = new MockController(Settings.builder() .put(IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, "4mb").build()); @@ -316,7 +316,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - IndexService indexService = indicesService.indexService("index"); + IndexService indexService = indicesService.indexService(resolveIndex("index")); IndexShard shard = indexService.getShardOrNull(0); assertNotNull(shard); @@ -342,7 +342,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { @Override protected long getIndexBufferRAMBytesUsed(IndexShard shard) { return shard.getIndexBufferRAMBytesUsed(); - } + } @Override protected void writeIndexingBufferAsync(IndexShard shard) { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index e34e1d6bd6b..367f4cd46ce 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -49,8 +49,9 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas assertAcked(client().admin().indices().prepareCreate("test") .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); ensureGreen(); - IndexMetaData metaData = indicesService.indexService("test").getMetaData(); - ShardRouting shardRouting = indicesService.indexService("test").getShard(0).routingEntry(); + Index idx = resolveIndex("test"); + IndexMetaData metaData = indicesService.indexService(idx).getMetaData(); + ShardRouting shardRouting = indicesService.indexService(idx).getShard(0).routingEntry(); final AtomicInteger counter = new AtomicInteger(1); IndexEventListener countingListener = new IndexEventListener() { @Override @@ -89,10 +90,11 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas counter.incrementAndGet(); } }; - indicesService.deleteIndex("test", "simon says"); + indicesService.deleteIndex(idx, "simon says"); try { NodeServicesProvider nodeServicesProvider = getInstanceFromNode(NodeServicesProvider.class); IndexService index = indicesService.createIndex(nodeServicesProvider, metaData, Arrays.asList(countingListener)); + idx = index.index(); ShardRouting newRouting = new ShardRouting(shardRouting); String nodeId = newRouting.currentNodeId(); ShardRoutingHelper.moveToUnassigned(newRouting, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom")); @@ -106,7 +108,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas ShardRoutingHelper.moveToStarted(newRouting); shard.updateRoutingEntry(newRouting, true); } finally { - indicesService.deleteIndex("test", "simon says"); + indicesService.deleteIndex(idx, "simon says"); } assertEquals(7, counter.get()); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index e9f1f6be518..57a7f34e4b7 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -73,12 +73,14 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas( 1).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", meta.getSettings()); - assertFalse("no shard location", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + ShardId shardId = new ShardId(meta.getIndex(), 0); + assertFalse("no shard location", indicesService.canDeleteShardContent(shardId, indexSettings)); IndexService test = createIndex("test"); + shardId = new ShardId(test.index(), 0); assertTrue(test.hasShard(0)); - assertFalse("shard is allocated", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + assertFalse("shard is allocated", indicesService.canDeleteShardContent(shardId, test.getIndexSettings())); test.removeShard(0, "boom"); - assertTrue("shard is removed", indicesService.canDeleteShardContent(new ShardId("test", "_na_", 0), indexSettings)); + assertTrue("shard is removed", indicesService.canDeleteShardContent(shardId, test.getIndexSettings())); } public void testDeleteIndexStore() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 239cb7a9096..936e8ac600a 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -42,7 +42,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testModificationPreventsFlushing() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -86,7 +86,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSingleShardSuccess() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -106,7 +106,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSyncFailsIfOperationIsInFlight() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -126,7 +126,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { createIndex("test"); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -159,7 +159,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testFailAfterIntermediateCommit() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); @@ -192,7 +192,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testFailWhenCommitIsMissing() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); + IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 155032f1d8c..98d4f84c6ef 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.Index; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; @@ -261,14 +262,16 @@ public class IndexRecoveryIT extends ESIntegTestCase { .execute().actionGet().getState(); logger.info("--> waiting for recovery to start both on source and target"); + final Index index = resolveIndex(INDEX_NAME); assertBusy(new Runnable() { @Override public void run() { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); - assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsSource(), + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); - assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsTarget(), + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); } }); diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 78d5e2203f5..d85849570cf 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -65,7 +65,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertEquals(indexMetaData.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService("test"); + IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024); @@ -79,7 +79,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); assertNull(indexMetaData.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService("test"); + IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().bytes(), 1024); diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 8a9fa191854..35624085c94 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -376,12 +377,13 @@ public class RareClusterStateIT extends ESIntegTestCase { putMappingResponse.set(e); } }); + final Index index = resolveIndex("index"); // Wait for mappings to be available on master assertBusy(new Runnable() { @Override public void run() { final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); - final IndexService indexService = indicesService.indexServiceSafe("index"); + final IndexService indexService = indicesService.indexServiceSafe(index); assertNotNull(indexService); final MapperService mapperService = indexService.mapperService(); DocumentMapper mapper = mapperService.documentMapper("type"); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 948c005bf33..b1f94f203e4 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -336,10 +336,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // we have to do this in two steps as we now do async shard fetching before assigning, so the change to the // allocation filtering may not have immediate effect // TODO: we should add an easier to do this. It's too much of a song and dance.. + Index index = resolveIndex("test"); assertBusy(new Runnable() { @Override public void run() { - assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex("test")); + assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index)); } }); diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index a47217e3048..4b514763f72 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -165,7 +165,7 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { long startRecovery(RecoveriesCollection collection, RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) { IndicesService indexServices = getInstanceFromNode(IndicesService.class); - IndexShard indexShard = indexServices.indexServiceSafe("test").getShardOrNull(0); + IndexShard indexShard = indexServices.indexServiceSafe(resolveIndex("test")).getShardOrNull(0); final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT); return collection.startRecovery(indexShard, sourceNode, listener, timeValue); } diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java index 0c7c069ec34..8afbdca8c2e 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; @@ -143,6 +144,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { .setUpdateAllTypes(true) .get(); assertAcked(putMappingResponse); + Index test = resolveIndex("test"); assertBusy(new Runnable() { @Override public void run() { @@ -152,7 +154,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { boolean verified = false; IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); - IndexService indexService = indicesService.indexService("test"); + IndexService indexService = indicesService.indexService(test); if (indexService != null) { MapperService mapperService = indexService.mapperService(); DocumentMapper documentMapper = mapperService.documentMapper("child"); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java index 98a23b3e1fd..8a86a0a1fb4 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoShapeIntegrationTests.java @@ -76,7 +76,7 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase { // left orientation test IndicesService indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName)); - IndexService indexService = indicesService.indexService(idxName); + IndexService indexService = indicesService.indexService(resolveIndex(idxName)); MappedFieldType fieldType = indexService.mapperService().fullName("location"); assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); @@ -88,7 +88,7 @@ public class GeoShapeIntegrationTests extends ESIntegTestCase { // right orientation test indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName+"2")); - indexService = indicesService.indexService(idxName+"2"); + indexService = indicesService.indexService(resolveIndex((idxName+"2"))); fieldType = indexService.mapperService().fullName("location"); assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index a3161f4090f..aea35a3acd0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -41,6 +41,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -95,6 +96,7 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.IndexSettings; @@ -836,7 +838,7 @@ public abstract class ESIntegTestCase extends ESTestCase { assertThat(nodes, Matchers.not(Matchers.emptyIterable())); for (String node : nodes) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexService(index); + IndexService indexService = indicesService.indexService(resolveIndex(index)); assertThat("index service doesn't exists on " + node, indexService, notNullValue()); DocumentMapper documentMapper = indexService.mapperService().documentMapper(type); assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue()); @@ -2041,7 +2043,7 @@ public abstract class ESIntegTestCase extends ESTestCase { * of the provided index. */ protected String routingKeyForShard(String index, String type, int shard) { - return internalCluster().routingKeyForShard(index, type, shard, getRandom()); + return internalCluster().routingKeyForShard(resolveIndex(index), type, shard, getRandom()); } /** @@ -2144,4 +2146,11 @@ public abstract class ESIntegTestCase extends ESTestCase { public @interface SuppressNetworkMode { } + public static Index resolveIndex(String index) { + GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get(); + assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); + String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID); + return new Index(index, uuid); + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index fc713400262..6e16d60eafc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; @@ -38,6 +39,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.MockNode; @@ -255,7 +257,14 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW)); assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1)); IndicesService instanceFromNode = getInstanceFromNode(IndicesService.class); - return instanceFromNode.indexServiceSafe(index); + return instanceFromNode.indexServiceSafe(resolveIndex(index)); + } + + public Index resolveIndex(String index) { + GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get(); + assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); + String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID); + return new Index(index, uuid); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 04548eb85c9..82c7db11d69 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -66,6 +66,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; @@ -1697,7 +1698,7 @@ public final class InternalTestCluster extends TestCluster { } } - synchronized String routingKeyForShard(String index, String type, int shard, Random random) { + synchronized String routingKeyForShard(Index index, String type, int shard, Random random) { assertThat(shard, greaterThanOrEqualTo(0)); assertThat(shard, greaterThanOrEqualTo(0)); for (NodeAndClient n : nodes.values()) { @@ -1710,7 +1711,7 @@ public final class InternalTestCluster extends TestCluster { OperationRouting operationRouting = getInstanceFromNode(OperationRouting.class, node); while (true) { String routing = RandomStrings.randomAsciiOfLength(random, 10); - final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId(); + final int targetShard = operationRouting.indexShards(clusterService.state(), index.getName(), type, null, routing).shardId().getId(); if (shard == targetShard) { return routing; } From 12a6f36a341209c2e7e3958ce6f3dbca381c6ff8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 9 Mar 2016 15:13:23 -0500 Subject: [PATCH 21/37] Log shard after translog snapshot during recovery --- .../elasticsearch/indices/recovery/RecoverySourceHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index b92e2066af2..15b9b59dd28 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -137,7 +137,7 @@ public class RecoverySourceHandler { } } - logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations()); + logger.trace("{} snapshot translog for recovery. current size is [{}]", shard.shardId(), translogView.totalOperations()); try { phase2(translogView.snapshot()); } catch (Throwable e) { From 38241a5d8baf887e3098c27fc0f425fc06774afe Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 9 Mar 2016 10:08:58 -0500 Subject: [PATCH 22/37] [reindex] Implement CompositeIndicesRequest Implements CompositeIndicesRequest on UpdateByQueryRequest and ReindexRequest so that plugins can reason about the request. In both cases this implementation is imperfect but useful because instead of listing all requests that make up the request it instead attempts to make dummy requests that represent the requests that it will later make. --- .../index/reindex/ReindexRequest.java | 32 +++++++++++- .../index/reindex/UpdateByQueryRequest.java | 38 ++++++++++++-- .../reindex/UpdateByQueryRequestTests.java | 49 +++++++++++++++++++ 3 files changed, 114 insertions(+), 5 deletions(-) create mode 100644 modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 1ac6117d02b..d51fb7e8bc1 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -19,19 +19,31 @@ package org.elasticsearch.index.reindex; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; -import java.io.IOException; +import static java.util.Collections.unmodifiableList; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.VersionType.INTERNAL; -public class ReindexRequest extends AbstractBulkIndexByScrollRequest { +/** + * Request to reindex some documents from one index to another. This implements CompositeIndicesRequest but in a misleading way. Rather than + * returning all the subrequests that it will make it tries to return a representative set of subrequests. This is best-effort for a bunch + * of reasons, not least of which that scripts are allowed to change the destination request in drastic ways, including changing the index + * to which documents are written. + */ +public class ReindexRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { /** * Prototype for index requests. */ @@ -123,4 +135,20 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequestnot + * accurate since it returns a prototype {@link IndexRequest} and not the actual requests that will be issued as part of the + * execution of this request. Additionally, scripts can modify the underlying {@link IndexRequest} and change values such as the index, + * type, {@link org.elasticsearch.action.support.IndicesOptions}. In short - only use this for very course reasoning about the request. + * + * @return a list comprising of the {@link SearchRequest} and the prototype {@link IndexRequest} + */ + @Override + public List subRequests() { + assert getSearchRequest() != null; + assert getDestination() != null; + return unmodifiableList(Arrays.asList(getSearchRequest(), getDestination())); + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index b2775393877..915921d6077 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -19,13 +19,23 @@ package org.elasticsearch.index.reindex; +import java.util.ArrayList; +import java.util.List; + +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; +import static java.util.Collections.unmodifiableList; + /** - * Request to reindex a set of documents where they are without changing their - * locations or IDs. + * Request to update some documents. That means you can't change their type, id, index, or anything like that. This implements + * CompositeIndicesRequest but in a misleading way. Rather than returning all the subrequests that it will make it tries to return a + * representative set of subrequests. This is best-effort but better than {@linkplain ReindexRequest} because scripts can't change the + * destination index and things. */ -public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest { +public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest implements CompositeIndicesRequest { /** * Ingest pipeline to set on index requests made by this action. */ @@ -64,4 +74,26 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequestnot + * accurate since it returns dummy {@link IndexRequest}s and not the actual requests that will be issued as part of the + * execution of this request. + * + * @return a list comprising of the {@link SearchRequest} and dummy {@link IndexRequest}s + */ + @Override + public List subRequests() { + assert getSearchRequest() != null; + List subRequests = new ArrayList<>(); + // One dummy IndexRequest per destination index. + for (String index : getSearchRequest().indices()) { + IndexRequest request = new IndexRequest(); + request.index(index); + subRequests.add(request); + } + subRequests.add(getSearchRequest()); + return unmodifiableList(subRequests); + }; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java new file mode 100644 index 00000000000..f6780729143 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import java.util.List; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.test.ESTestCase; + +import static org.apache.lucene.util.TestUtil.randomSimpleString; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.sameInstance; + +public class UpdateByQueryRequestTests extends ESTestCase { + public void testUpdateByQueryRequestImplementsCompositeIndicesRequestWithDummies() { + int numIndices = between(1, 100); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = randomSimpleString(random(), 1, 30); + } + UpdateByQueryRequest request = new UpdateByQueryRequest(new SearchRequest(indices)); + List subRequests = request.subRequests(); + assertThat(subRequests, hasSize(numIndices + 1)); + for (int i = 0; i < numIndices; i++) { + assertThat(subRequests.get(i).indices(), arrayWithSize(1)); + assertEquals(indices[i], subRequests.get(i).indices()[0]); + } + assertThat(subRequests.get(numIndices), sameInstance(request.getSearchRequest())); + } +} From 61f39e6c92ddbfb7dcf117f790e92abed93c91a4 Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Thu, 19 Nov 2015 11:14:12 -0600 Subject: [PATCH 23/37] GeoPointV2 update docs and query builders This commit updates the documentation for GeoPointField by removing all references to the coerce and doc_values parameters. DocValues are enabled in lucene GeoPointField by default (required for boundary filtering). The QueryBuilders are updated to automatically normalize points (ignoring the coerce parameter) for any index created onOrAfter version 2.2. --- .../index/query/GeoBoundingBoxQueryBuilder.java | 4 ++-- .../index/query/GeoDistanceQueryBuilder.java | 4 ++-- .../index/query/GeoDistanceRangeQueryBuilder.java | 2 +- .../index/query/GeoPolygonQueryBuilder.java | 4 ++-- docs/reference/mapping/types/geo-point.asciidoc | 11 ----------- .../query-dsl/geo-bounding-box-query.asciidoc | 3 --- docs/reference/query-dsl/geo-distance-query.asciidoc | 5 ----- docs/reference/query-dsl/geo-polygon-query.asciidoc | 3 --- 8 files changed, 7 insertions(+), 29 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 05c2a74bb9f..2c906dc7cb1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -250,7 +250,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 784c924efcf..b11b57df175 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -219,18 +219,18 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder>:: - - Normalize longitude and latitude values to a standard -180:180 / -90:90 - coordinate system. Accepts `true` and `false` (default). - -<>:: - - Should the field be stored on disk in a column-stride fashion, so that it - can later be used for sorting, aggregations, or scripting? Accepts `true` - (default) or `false`. - <>:: Should the geo-point also be indexed as a geohash in the `.geohash` diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index c52bcb93e7d..90ae7367197 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -52,9 +52,6 @@ Then the following simple query can be executed with a |Option |Description |`_name` |Optional name field to identify the filter -|`coerce` |Set to `true` to normalize longitude and latitude values to a -standard -180:180 / -90:90 coordinate system. (default is `false`). - |`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or longitude (default is `false`). diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index c5b6029dc2f..7ea380bdad2 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -162,11 +162,6 @@ The following are options allowed on the filter: Optional name field to identify the query -`coerce`:: - - Set to `true` to normalize longitude and latitude values to a standard -180:180 / -90:90 - coordinate system. (default is `false`). - `ignore_malformed`:: Set to `true` to accept geo points with invalid latitude or diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 306b2dd2d84..269aeed09ca 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -34,9 +34,6 @@ points. Here is an example: |Option |Description |`_name` |Optional name field to identify the filter -|`coerce` |Set to `true` to normalize longitude and latitude values to a -standard -180:180 / -90:90 coordinate system. (default is `false`). - |`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or longitude (default is `false`). |======================================================================= From 55635d5de126e27d68f83e28ac43fba95a172324 Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Thu, 3 Dec 2015 09:53:19 -0600 Subject: [PATCH 24/37] update coerce and breaking changes documentation --- docs/reference/mapping/params/coerce.asciidoc | 1 - docs/reference/migration/migrate_2_2.asciidoc | 10 ++++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index c9491607a6b..0121c307230 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -12,7 +12,6 @@ For instance: * Strings will be coerced to numbers. * Floating points will be truncated for integer values. -* Lon/lat geo-points will be normalized to a standard -180:180 / -90:90 coordinate system. For instance: diff --git a/docs/reference/migration/migrate_2_2.asciidoc b/docs/reference/migration/migrate_2_2.asciidoc index 39c059e7f47..9611d86a2ac 100644 --- a/docs/reference/migration/migrate_2_2.asciidoc +++ b/docs/reference/migration/migrate_2_2.asciidoc @@ -4,6 +4,16 @@ This section discusses the changes that you need to be aware of when migrating your application to Elasticsearch 2.2. +[[float]] +=== Mapping APIs + +==== Geo Point Type + +The `geo_point` format has been changed to reduce index size and the time required to both index and query +geo point data. To make these performance improvements possible both `doc_values` are `coerce` are required +and therefore cannot be changed. For this reason the `doc_values` and `coerce` parameters have been removed +from the <> field mapping. + [float] === Scripting and security From b4db26eaf9a07948e4da8d1197aadfb414699562 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 8 Mar 2016 18:37:20 +0100 Subject: [PATCH 25/37] Sort: Move up `order` field to SortBuilder Currently all SortBuilder implementations have their separate order field. This PR moves this up to SortBuilder, together with setter and getter and makes sure the default is set to SortOrder.ASC except for `_score` sorting where the default is SortOrder.DESC. --- .../search/sort/FieldSortBuilder.java | 17 +--- .../search/sort/GeoDistanceSortBuilder.java | 77 +++++++------------ .../search/sort/ScoreSortBuilder.java | 27 ++----- .../search/sort/ScriptSortBuilder.java | 19 +---- .../search/sort/SortBuilder.java | 24 +++++- .../builder/SearchSourceBuilderTests.java | 8 +- 6 files changed, 61 insertions(+), 111 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 67ceb75a29c..e805e21eff5 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -27,12 +27,10 @@ import java.io.IOException; /** * A sort builder to sort based on a document field. */ -public class FieldSortBuilder extends SortBuilder { +public class FieldSortBuilder extends SortBuilder { private final String fieldName; - private SortOrder order; - private Object missing; private String unmappedType; @@ -55,15 +53,6 @@ public class FieldSortBuilder extends SortBuilder { this.fieldName = fieldName; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public FieldSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - /** * Sets the value when a field is missing in a doc. Can also be set to _last or * _first to sort missing last or first respectively. @@ -118,9 +107,7 @@ public class FieldSortBuilder extends SortBuilder { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(fieldName); - if (order != null) { - builder.field("order", order.toString()); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (missing != null) { builder.field("missing", missing); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 708152af1f0..b5a10e238b7 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -44,7 +44,7 @@ import java.util.Objects; /** * A geo distance based sorting on a geo point like field. */ -public class GeoDistanceSortBuilder extends SortBuilder +public class GeoDistanceSortBuilder extends SortBuilder implements ToXContent, NamedWriteable, SortElementParserTemp { public static final String NAME = "_geo_distance"; public static final boolean DEFAULT_COERCE = false; @@ -57,14 +57,13 @@ public class GeoDistanceSortBuilder extends SortBuilder private GeoDistance geoDistance = GeoDistance.DEFAULT; private DistanceUnit unit = DistanceUnit.DEFAULT; - private SortOrder order = SortOrder.ASC; - + // TODO there is an enum that covers that parameter which we should be using here private String sortMode = null; @SuppressWarnings("rawtypes") private QueryBuilder nestedFilter; private String nestedPath; - + // TODO switch to GeoValidationMethod enum private boolean coerce = DEFAULT_COERCE; private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED; @@ -109,7 +108,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } this.fieldName = fieldName; } - + /** * Copy constructor. * */ @@ -125,7 +124,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.coerce = original.coerce; this.ignoreMalformed = original.ignoreMalformed; } - + /** * Returns the geo point like field the distance based sort operates on. * */ @@ -153,7 +152,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.points.addAll(Arrays.asList(points)); return this; } - + /** * Returns the points to create the range distance facets from. */ @@ -163,7 +162,7 @@ public class GeoDistanceSortBuilder extends SortBuilder /** * The geohash of the geo point to create the range distance facets from. - * + * * Deprecated - please use points(GeoPoint... points) instead. */ @Deprecated @@ -173,7 +172,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + /** * The geo distance type used to compute the distance. */ @@ -181,7 +180,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.geoDistance = geoDistance; return this; } - + /** * Returns the geo distance type used to compute the distance. */ @@ -204,20 +203,6 @@ public class GeoDistanceSortBuilder extends SortBuilder return this.unit; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public GeoDistanceSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - - /** Returns the order of sorting. */ - public SortOrder order() { - return this.order; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max @@ -240,16 +225,16 @@ public class GeoDistanceSortBuilder extends SortBuilder * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } - /** + /** * Returns the nested filter that the nested objects should match with in order to be taken into account - * for sorting. + * for sorting. **/ - public QueryBuilder getNestedFilter() { + public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -261,7 +246,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.nestedPath = nestedPath; return this; } - + /** * Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * field inside a nested object, the nearest upper nested object is selected as nested path. @@ -285,7 +270,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + public boolean ignoreMalformed() { return this.ignoreMalformed; } @@ -302,11 +287,7 @@ public class GeoDistanceSortBuilder extends SortBuilder builder.field("unit", unit); builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT)); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } else { - builder.field("reverse", false); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { builder.field("mode", sortMode); @@ -363,7 +344,7 @@ public class GeoDistanceSortBuilder extends SortBuilder public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeGenericValue(points); - + geoDistance.writeTo(out); unit.writeTo(out); order.writeTo(out); @@ -382,10 +363,10 @@ public class GeoDistanceSortBuilder extends SortBuilder @Override public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException { String fieldName = in.readString(); - - ArrayList points = (ArrayList) in.readGenericValue(); + + ArrayList points = (ArrayList) in.readGenericValue(); GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()])); - + result.geoDistance(GeoDistance.readGeoDistanceFrom(in)); result.unit(DistanceUnit.readDistanceUnit(in)); result.order(SortOrder.readOrderFrom(in)); @@ -409,9 +390,9 @@ public class GeoDistanceSortBuilder extends SortBuilder List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; GeoDistance geoDistance = GeoDistance.DEFAULT; - boolean reverse = false; + SortOrder order = SortOrder.ASC; MultiValueMode sortMode = null; - QueryBuilder nestedFilter = null; + QueryBuilder nestedFilter = null; String nestedPath = null; boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; @@ -429,8 +410,8 @@ public class GeoDistanceSortBuilder extends SortBuilder } else if (token == XContentParser.Token.START_OBJECT) { // the json in the format of -> field : { lat : 30, lon : 12 } if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { - // TODO Note to remember: while this is kept as a QueryBuilder internally, - // we need to make sure to call toFilter() on it once on the shard + // TODO Note to remember: while this is kept as a QueryBuilder internally, + // we need to make sure to call toFilter() on it once on the shard // (e.g. in the new build() method) nestedFilter = context.parseInnerQueryBuilder(); } else { @@ -441,9 +422,9 @@ public class GeoDistanceSortBuilder extends SortBuilder } } else if (token.isValue()) { if ("reverse".equals(currentName)) { - reverse = parser.booleanValue(); + order = parser.booleanValue() ? SortOrder.DESC : SortOrder.ASC; } else if ("order".equals(currentName)) { - reverse = "desc".equals(parser.text()); + order = SortOrder.fromString(parser.text()); } else if ("unit".equals(currentName)) { unit = DistanceUnit.fromString(parser.text()); } else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) { @@ -474,11 +455,7 @@ public class GeoDistanceSortBuilder extends SortBuilder GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()])); result.geoDistance(geoDistance); result.unit(unit); - if (reverse) { - result.order(SortOrder.DESC); - } else { - result.order(SortOrder.ASC); - } + result.order(order); if (sortMode != null) { result.sortMode(sortMode.name()); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 5d1a0d82987..6b1bc054ee7 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -35,38 +35,24 @@ import java.util.Objects; /** * A sort builder allowing to sort by score. */ -public class ScoreSortBuilder extends SortBuilder implements NamedWriteable, +public class ScoreSortBuilder extends SortBuilder implements NamedWriteable, SortElementParserTemp { private static final String NAME = "_score"; static final ScoreSortBuilder PROTOTYPE = new ScoreSortBuilder(); public static final ParseField REVERSE_FIELD = new ParseField("reverse"); public static final ParseField ORDER_FIELD = new ParseField("order"); - private SortOrder order = SortOrder.DESC; - /** - * The order of sort scoring. By default, its {@link SortOrder#DESC}. - */ - @Override - public ScoreSortBuilder order(SortOrder order) { - Objects.requireNonNull(order, "sort order cannot be null."); - this.order = order; - return this; + public ScoreSortBuilder() { + // order defaults to desc when sorting on the _score + order(SortOrder.DESC); } - /** - * Get the order of sort scoring. By default, its {@link SortOrder#DESC}. - */ - public SortOrder order() { - return this.order; - } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); - if (order == SortOrder.ASC) { - builder.field(REVERSE_FIELD.getPreferredName(), true); - } + builder.field(ORDER_FIELD.getPreferredName(), order); builder.endObject(); return builder; } @@ -124,7 +110,8 @@ public class ScoreSortBuilder extends SortBuilder implements NamedWriteable { private Script script; private final String type; - private SortOrder order; - private String sortMode; private QueryBuilder nestedFilter; @@ -53,15 +51,6 @@ public class ScriptSortBuilder extends SortBuilder { this.type = type; } - /** - * Sets the sort order. - */ - @Override - public ScriptSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max @@ -75,7 +64,7 @@ public class ScriptSortBuilder extends SortBuilder { * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } @@ -94,9 +83,7 @@ public class ScriptSortBuilder extends SortBuilder { builder.startObject("_script"); builder.field("script", script); builder.field("type", type); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { builder.field("mode", sortMode); } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index 0935b76ece9..7852af4e97e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -20,14 +20,20 @@ package org.elasticsearch.search.sort; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import java.util.Objects; + /** * */ -public abstract class SortBuilder implements ToXContent { +public abstract class SortBuilder> implements ToXContent { + + protected SortOrder order = SortOrder.ASC; + public static final ParseField ORDER_FIELD = new ParseField("order"); @Override public String toString() { @@ -42,7 +48,19 @@ public abstract class SortBuilder implements ToXContent { } /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. + * Set the order of sorting. */ - public abstract SortBuilder order(SortOrder order); + @SuppressWarnings("unchecked") + public T order(SortOrder order) { + Objects.requireNonNull(order, "sort order cannot be null."); + this.order = order; + return (T) this; + } + + /** + * Return the {@link SortOrder} used for this {@link SortBuilder}. + */ + public SortOrder order() { + return this.order; + } } diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 6b29cabe3f6..2cb4554ed08 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -49,15 +49,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.AbstractQueryTestCase; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.EmptyQueryBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.query.WrapperQueryBuilder; -import org.elasticsearch.index.query.functionscore.ScoreFunctionParser; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -554,7 +548,7 @@ public class SearchSourceBuilderTests extends ESTestCase { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.parseSearchSource(parser, createParseContext(parser), aggParsers); assertEquals(1, searchSourceBuilder.sorts().size()); - assertEquals("{\"foo\":{}}", searchSourceBuilder.sorts().get(0).toUtf8()); + assertEquals("{\"foo\":{\"order\":\"asc\"}}", searchSourceBuilder.sorts().get(0).toUtf8()); } } From cd12241e9f76c01c98961afb4f5b874926cc4269 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Sat, 27 Feb 2016 18:48:42 +0100 Subject: [PATCH 26/37] Decouple the TransportService and ClusterService #16872 Currently, the cluster service is tightly coupled to the transport service by both managing node connections and requiring the bound address in order to create the local disco node. This commit introduces a new NodeConnectionsService which is in charge of node connection management and makes it possible to remove all network related calls from the cluster service. The local DiscoNode is now created by DiscoveryNodeService and is set both the cluster service and the transport service during node start up. Closes #16788 Closes #16872 --- .../elasticsearch/cluster/ClusterModule.java | 1 + .../elasticsearch/cluster/ClusterService.java | 6 - .../cluster/NodeConnectionsService.java | 156 ++++ .../cluster/node/DiscoveryNodeService.java | 25 +- .../service/InternalClusterService.java | 175 +--- .../common/settings/ClusterSettings.java | 6 +- .../common/util/concurrent/KeyedLock.java | 52 +- .../java/org/elasticsearch/node/Node.java | 18 + .../transport/netty/NettyTransport.java | 18 +- .../node/tasks/TaskManagerTestCase.java | 4 +- .../admin/cluster/node/tasks/TasksIT.java | 13 +- .../cluster/ClusterServiceIT.java | 708 --------------- .../cluster/NodeConnectionsServiceTests.java | 275 ++++++ ...rdFailedClusterStateTaskExecutorTests.java | 4 +- .../cluster/service/ClusterServiceTests.java | 824 ++++++++++++++++++ .../elasticsearch/test/MockLogAppender.java | 2 +- .../transport/netty/KeyedLockTests.java | 33 +- .../elasticsearch/tribe/TribeUnitTests.java | 6 +- .../org/elasticsearch/test/ESTestCase.java | 32 +- .../test/InternalTestCluster.java | 8 +- .../test/cluster/NoopClusterService.java | 6 - .../test/cluster/TestClusterService.java | 14 - 22 files changed, 1423 insertions(+), 963 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java create mode 100644 core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java create mode 100644 core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 3e668191ff3..6d9273b2661 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -136,6 +136,7 @@ public class ClusterModule extends AbstractModule { bind(AllocationService.class).asEagerSingleton(); bind(DiscoveryNodeService.class).asEagerSingleton(); bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton(); + bind(NodeConnectionsService.class).asEagerSingleton(); bind(OperationRouting.class).asEagerSingleton(); bind(MetaDataCreateIndexService.class).asEagerSingleton(); bind(MetaDataDeleteIndexService.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java index 27df4b9e96f..10d547afc5c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.tasks.TaskManager; import java.util.List; @@ -154,9 +153,4 @@ public interface ClusterService extends LifecycleComponent { * @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue */ TimeValue getMaxTaskWaitTime(); - - /** - * Returns task manager created in the cluster service - */ - TaskManager getTaskManager(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java new file mode 100644 index 00000000000..cce25652ed7 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ScheduledFuture; + +/** + * This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are + * removed. Also, it periodically checks that all connections are still open and if needed restores them. + * Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond + * to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection + * is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}. + */ +public class NodeConnectionsService extends AbstractLifecycleComponent { + + public static final Setting CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = + Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + private final ThreadPool threadPool; + private final TransportService transportService; + + // map between current node and the number of failed connection attempts. 0 means successfully connected. + // if a node doesn't appear in this list it shouldn't be monitored + private ConcurrentMap nodes = ConcurrentCollections.newConcurrentMap(); + + final private KeyedLock nodeLocks = new KeyedLock<>(); + + private final TimeValue reconnectInterval; + + private volatile ScheduledFuture backgroundFuture = null; + + @Inject + public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) { + super(settings); + this.threadPool = threadPool; + this.transportService = transportService; + this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); + } + + public void connectToAddedNodes(ClusterChangedEvent event) { + + // TODO: do this in parallel (and wait) + for (final DiscoveryNode node : event.nodesDelta().addedNodes()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + Integer current = nodes.put(node, 0); + assert current == null : "node " + node + " was added in event but already in internal nodes"; + validateNodeConnected(node); + } + } + } + + public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + for (final DiscoveryNode node : event.nodesDelta().removedNodes()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + Integer current = nodes.remove(node); + assert current != null : "node " + node + " was removed in event but not in internal nodes"; + try { + transportService.disconnectFromNode(node); + } catch (Throwable e) { + logger.warn("failed to disconnect to node [" + node + "]", e); + } + } + } + } + + void validateNodeConnected(DiscoveryNode node) { + assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock"; + if (lifecycle.stoppedOrClosed() || + nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time... + // nothing to do + } else { + try { + // connecting to an already connected node is a noop + transportService.connectToNode(node); + nodes.put(node, 0); + } catch (Exception e) { + Integer nodeFailureCount = nodes.get(node); + assert nodeFailureCount != null : node + " didn't have a counter in nodes map"; + nodeFailureCount = nodeFailureCount + 1; + // log every 6th failure + if ((nodeFailureCount % 6) == 1) { + logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount); + } + nodes.put(node, nodeFailureCount); + } + } + } + + class ConnectionChecker extends AbstractRunnable { + + @Override + public void onFailure(Throwable t) { + logger.warn("unexpected error while checking for node reconnects", t); + } + + protected void doRun() { + for (DiscoveryNode node : nodes.keySet()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + validateNodeConnected(node); + } + } + } + + @Override + public void onAfter() { + if (lifecycle.started()) { + backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this); + } + } + } + + @Override + protected void doStart() { + backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker()); + } + + @Override + protected void doStop() { + FutureUtils.cancel(backgroundFuture); + } + + @Override + protected void doClose() { + + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java index 83f603d2890..47c0e0052d3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java @@ -19,24 +19,40 @@ package org.elasticsearch.cluster.node; +import org.elasticsearch.Version; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.CopyOnWriteArrayList; /** */ public class DiscoveryNodeService extends AbstractComponent { + public static final Setting NODE_ID_SEED_SETTING = + // don't use node.id.seed so it won't be seen as an attribute + Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER); private final List customAttributesProviders = new CopyOnWriteArrayList<>(); + private final Version version; @Inject - public DiscoveryNodeService(Settings settings) { + public DiscoveryNodeService(Settings settings, Version version) { super(settings); + this.version = version; + } + + public static String generateNodeId(Settings settings) { + Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); + return Strings.randomBase64UUID(random); } public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) { @@ -44,7 +60,7 @@ public class DiscoveryNodeService extends AbstractComponent { return this; } - public Map buildAttributes() { + public DiscoveryNode buildLocalNode(TransportAddress publishAddress) { Map attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap()); attributes.remove("name"); // name is extracted in other places if (attributes.containsKey("client")) { @@ -76,10 +92,11 @@ public class DiscoveryNodeService extends AbstractComponent { } } - return attributes; + final String nodeId = generateNodeId(settings); + return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version); } - public static interface CustomAttributesProvider { + public interface CustomAttributesProvider { Map buildAttributes(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 3d70ac84e33..7cd3d840fbc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.service; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -32,19 +31,18 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -54,7 +52,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; @@ -65,9 +62,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; @@ -78,8 +73,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Queue; -import java.util.Random; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.concurrent.Future; @@ -97,25 +90,15 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; - public static final Setting NODE_ID_SEED_SETTING = - // don't use node.id.seed so it won't be seen as an attribute - Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER); private final ThreadPool threadPool; private BiConsumer clusterStatePublisher; private final OperationRouting operationRouting; - private final TransportService transportService; - private final ClusterSettings clusterSettings; - private final DiscoveryNodeService discoveryNodeService; - private final Version version; - - private final TimeValue reconnectInterval; private TimeValue slowTaskLoggingThreshold; @@ -140,47 +123,49 @@ public class InternalClusterService extends AbstractLifecycleComponent publisher) { + synchronized public void setClusterStatePublisher(BiConsumer publisher) { clusterStatePublisher = publisher; } + synchronized public void setLocalNode(DiscoveryNode localNode) { + assert clusterState.nodes().localNodeId() == null : "local node is already set"; + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id()); + this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + } + + synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; + this.nodeConnectionsService = nodeConnectionsService; + } + @Override - public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { + synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } @@ -188,12 +173,12 @@ public class InternalClusterService extends AbstractLifecycleComponent nodeAttributes = discoveryNodeService.buildAttributes(); - // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling - final String nodeId = generateNodeId(settings); - final TransportAddress publishAddress = transportService.boundAddress().publishAddress(); - DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version); - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()); - this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build(); - this.transportService.setLocalNode(localNode); + this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); } @Override - protected void doStop() { - FutureUtils.cancel(this.reconnectToNodes); + synchronized protected void doStop() { for (NotifyTimeout onGoingTimeout : onGoingTimeouts) { onGoingTimeout.cancel(); onGoingTimeout.listener.onClose(); @@ -230,7 +207,7 @@ public class InternalClusterService extends AbstractLifecycleComponent batchResult; - long startTimeNS = System.nanoTime(); + long startTimeNS = currentTimeInNanos(); try { List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); batchResult = executor.execute(previousClusterState, inputs); } catch (Throwable e) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); + StringBuilder sb = new StringBuilder("failed to execute cluster state update in [").append(executionTime).append("], state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); sb.append(previousClusterState.nodes().prettyPrint()); sb.append(previousClusterState.routingTable().prettyPrint()); sb.append(previousClusterState.getRoutingNodes().prettyPrint()); @@ -509,8 +481,8 @@ public class InternalClusterService extends AbstractLifecycleComponent slowTaskLoggingThreshold.getMillis()) { - logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); + logger.warn("cluster state update task [{}] took [{}] above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); } } @@ -809,64 +770,6 @@ public class InternalClusterService extends AbstractLifecycleComponent failureCount = ConcurrentCollections.newConcurrentMap(); - - @Override - public void run() { - // master node will check against all nodes if its alive with certain discoveries implementations, - // but we can't rely on that, so we check on it as well - for (DiscoveryNode node : clusterState.nodes()) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time... - if (!transportService.nodeConnected(node)) { - try { - transportService.connectToNode(node); - } catch (Exception e) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone? - Integer nodeFailureCount = failureCount.get(node); - if (nodeFailureCount == null) { - nodeFailureCount = 1; - } else { - nodeFailureCount = nodeFailureCount + 1; - } - // log every 6th failure - if ((nodeFailureCount % 6) == 0) { - // reset the failure count... - nodeFailureCount = 0; - logger.warn("failed to reconnect to node {}", e, node); - } - failureCount.put(node, nodeFailureCount); - } - } - } - } - } - // go over and remove failed nodes that have been removed - DiscoveryNodes nodes = clusterState.nodes(); - for (Iterator failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) { - DiscoveryNode failedNode = failedNodesIt.next(); - if (!nodes.nodeExists(failedNode.id())) { - failedNodesIt.remove(); - } - } - if (lifecycle.started()) { - reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this); - } - } - } - - public static String generateNodeId(Settings settings) { - Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); - return Strings.randomBase64UUID(random); - } - private static class LocalNodeMasterListeners implements ClusterStateListener { private final List listeners = new CopyOnWriteArrayList<>(); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index fa8b8c4ac41..3215f3db05a 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -29,8 +29,10 @@ import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; @@ -259,7 +261,7 @@ public final class ClusterSettings extends AbstractScopedSettings { TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, - InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, Transport.TRANSPORT_TCP_COMPRESS, @@ -326,7 +328,7 @@ public final class ClusterSettings extends AbstractScopedSettings { Environment.PATH_SCRIPTS_SETTING, Environment.PATH_SHARED_DATA_SETTING, Environment.PIDFILE_SETTING, - InternalClusterService.NODE_ID_SEED_SETTING, + DiscoveryNodeService.NODE_ID_SEED_SETTING, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 83bb9fd690d..5c30330c156 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -20,7 +20,10 @@ package org.elasticsearch.common.util.concurrent; +import org.elasticsearch.common.lease.Releasable; + import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; @@ -29,9 +32,8 @@ import java.util.concurrent.locks.ReentrantLock; * created the first time they are acquired and removed if no thread hold the * lock. The latter is important to assure that the list of locks does not grow * infinitely. - * - * A Thread can acquire a lock only once. - * + * + * * */ public class KeyedLock { @@ -50,48 +52,38 @@ public class KeyedLock { private final ConcurrentMap map = ConcurrentCollections.newConcurrentMap(); - protected final ThreadLocal threadLocal = new ThreadLocal<>(); - - public void acquire(T key) { + public Releasable acquire(T key) { + assert isHeldByCurrentThread(key) == false : "lock for " + key + " is already heald by this thread"; while (true) { - if (threadLocal.get() != null) { - // if we are here, the thread already has the lock - throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() - + " for key " + key); - } KeyLock perNodeLock = map.get(key); if (perNodeLock == null) { KeyLock newLock = new KeyLock(fair); perNodeLock = map.putIfAbsent(key, newLock); if (perNodeLock == null) { newLock.lock(); - threadLocal.set(newLock); - return; + return new ReleasableLock(key, newLock); } } assert perNodeLock != null; int i = perNodeLock.count.get(); if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { perNodeLock.lock(); - threadLocal.set(perNodeLock); - return; + return new ReleasableLock(key, perNodeLock); } } } - public void release(T key) { - KeyLock lock = threadLocal.get(); + public boolean isHeldByCurrentThread(T key) { + KeyLock lock = map.get(key); if (lock == null) { - throw new IllegalStateException("Lock not acquired"); + return false; } - release(key, lock); + return lock.isHeldByCurrentThread(); } void release(T key, KeyLock lock) { - assert lock.isHeldByCurrentThread(); assert lock == map.get(key); lock.unlock(); - threadLocal.set(null); int decrementAndGet = lock.count.decrementAndGet(); if (decrementAndGet == 0) { map.remove(key, lock); @@ -99,6 +91,24 @@ public class KeyedLock { } + private final class ReleasableLock implements Releasable { + final T key; + final KeyLock lock; + final AtomicBoolean closed = new AtomicBoolean(); + + private ReleasableLock(T key, KeyLock lock) { + this.key = key; + this.lock = lock; + } + + @Override + public void close() { + if (closed.compareAndSet(false, true)) { + release(key, lock); + } + } + } + @SuppressWarnings("serial") private final static class KeyLock extends ReentrantLock { KeyLock(boolean fair) { diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index e279d3e819f..b995723127a 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -34,8 +34,10 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.MasterNodeChangePredicate; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.StopWatch; @@ -294,6 +296,10 @@ public class Node implements Closeable { "node cluster service implementation must inherit from InternalClusterService"; final InternalClusterService clusterService = (InternalClusterService) injector.getInstance(ClusterService.class); + final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class); + nodeConnectionsService.start(); + clusterService.setNodeConnectionsService(nodeConnectionsService); + // TODO hack around circular dependencies problems injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class)); @@ -311,6 +317,15 @@ public class Node implements Closeable { // Start the transport service now so the publish address will be added to the local disco node in ClusterService TransportService transportService = injector.getInstance(TransportService.class); transportService.start(); + DiscoveryNode localNode = injector.getInstance(DiscoveryNodeService.class) + .buildLocalNode(transportService.boundAddress().publishAddress()); + + // TODO: need to find a cleaner way to start/construct a service with some initial parameters, + // playing nice with the life cycle interfaces + clusterService.setLocalNode(localNode); + transportService.setLocalNode(localNode); + clusterService.add(transportService.getTaskManager()); + clusterService.start(); // start after cluster service so the local disco is known @@ -392,6 +407,7 @@ public class Node implements Closeable { injector.getInstance(RoutingService.class).stop(); injector.getInstance(ClusterService.class).stop(); injector.getInstance(Discovery.class).stop(); + injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(MonitorService.class).stop(); injector.getInstance(GatewayService.class).stop(); injector.getInstance(SearchService.class).stop(); @@ -449,6 +465,8 @@ public class Node implements Closeable { toClose.add(injector.getInstance(RoutingService.class)); toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); + toClose.add(() -> stopWatch.stop().start("node_connections_service")); + toClose.add(injector.getInstance(NodeConnectionsService.class)); toClose.add(() -> stopWatch.stop().start("discovery")); toClose.add(injector.getInstance(Discovery.class)); toClose.add(() -> stopWatch.stop().start("monitor")); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index dc9dd70ab8d..27ba643ef71 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.math.MathUtils; import org.elasticsearch.common.metrics.CounterMetric; @@ -943,8 +944,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem } globalLock.readLock().lock(); try { - connectionLock.acquire(node.id()); - try { + + try (Releasable ignored = connectionLock.acquire(node.id())) { if (!lifecycle.started()) { throw new IllegalStateException("can't add nodes to a stopped transport"); } @@ -979,8 +980,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem } catch (Exception e) { throw new ConnectTransportException(node, "general node connection failure", e); } - } finally { - connectionLock.release(node.id()); } } finally { globalLock.readLock().unlock(); @@ -1103,8 +1102,8 @@ public class NettyTransport extends AbstractLifecycleComponent implem @Override public void disconnectFromNode(DiscoveryNode node) { - connectionLock.acquire(node.id()); - try { + + try (Releasable ignored = connectionLock.acquire(node.id())) { NodeChannels nodeChannels = connectedNodes.remove(node); if (nodeChannels != null) { try { @@ -1115,8 +1114,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem transportServiceAdapter.raiseNodeDisconnected(node); } } - } finally { - connectionLock.release(node.id()); } } @@ -1128,8 +1125,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem // check outside of the lock NodeChannels nodeChannels = connectedNodes.get(node); if (nodeChannels != null && nodeChannels.hasChannel(channel)) { - connectionLock.acquire(node.id()); - try { + try (Releasable ignored = connectionLock.acquire(node.id())) { nodeChannels = connectedNodes.get(node); // check again within the connection lock, if its still applicable to remove it if (nodeChannels != null && nodeChannels.hasChannel(channel)) { @@ -1143,8 +1139,6 @@ public class NettyTransport extends AbstractLifecycleComponent implem } return true; } - } finally { - connectionLock.release(node.id()); } } return false; diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 4dcf54b5d0b..f5d8637571a 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -194,7 +194,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { } }; transportService.start(); - clusterService = new TestClusterService(threadPool, transportService); + clusterService = new TestClusterService(threadPool); clusterService.add(transportService.getTaskManager()); discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), Version.CURRENT); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); @@ -238,7 +238,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length]; for (int i = 0; i < nodes.length; i++) { listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks); - ((MockTaskManager) (nodes[i].clusterService.getTaskManager())).addListener(listeners[i]); + ((MockTaskManager) (nodes[i].transportService.getTaskManager())).addListener(listeners[i]); } return listeners; } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 8c791a99018..7c2747a1a28 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.test.tasks.MockTaskManagerListener; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ReceiveTimeoutTransportException; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -263,8 +264,8 @@ public class TasksIT extends ESIntegTestCase { ReentrantLock taskFinishLock = new ReentrantLock(); taskFinishLock.lock(); CountDownLatch taskRegistered = new CountDownLatch(1); - for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) { - ((MockTaskManager)clusterService.getTaskManager()).addListener(new MockTaskManagerListener() { + for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { + ((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() { @Override public void onTaskRegistered(Task task) { if (task.getAction().startsWith(IndexAction.NAME)) { @@ -408,7 +409,7 @@ public class TasksIT extends ESIntegTestCase { @Override public void tearDown() throws Exception { for (Map.Entry, RecordingTaskManagerListener> entry : listeners.entrySet()) { - ((MockTaskManager)internalCluster().getInstance(ClusterService.class, entry.getKey().v1()).getTaskManager()).removeListener(entry.getValue()); + ((MockTaskManager) internalCluster().getInstance(TransportService.class, entry.getKey().v1()).getTaskManager()).removeListener(entry.getValue()); } listeners.clear(); super.tearDown(); @@ -418,10 +419,10 @@ public class TasksIT extends ESIntegTestCase { * Registers recording task event listeners with the given action mask on all nodes */ private void registerTaskManageListeners(String actionMasks) { - for (ClusterService clusterService : internalCluster().getInstances(ClusterService.class)) { - DiscoveryNode node = clusterService.localNode(); + for (String nodeName : internalCluster().getNodeNames()) { + DiscoveryNode node = internalCluster().getInstance(ClusterService.class, nodeName).localNode(); RecordingTaskManagerListener listener = new RecordingTaskManagerListener(node, Strings.splitStringToArray(actionMasks, ',')); - ((MockTaskManager)clusterService.getTaskManager()).addListener(listener); + ((MockTaskManager) internalCluster().getInstance(TransportService.class, nodeName).getTaskManager()).addListener(listener); RecordingTaskManagerListener oldListener = listeners.put(new Tuple<>(node.name(), actionMasks), listener); assertNull(oldListener); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index f5c99fd5f7e..813557e314b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -18,16 +18,12 @@ */ package org.elasticsearch.cluster; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -35,38 +31,24 @@ import org.elasticsearch.common.inject.Singleton; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -85,74 +67,6 @@ public class ClusterServiceIT extends ESIntegTestCase { return pluginList(TestPlugin.class); } - public void testTimeoutUpdateTask() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); - final CountDownLatch block = new CountDownLatch(1); - clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - try { - block.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - throw new RuntimeException(t); - } - }); - - final CountDownLatch timedOut = new CountDownLatch(1); - final AtomicBoolean executeCalled = new AtomicBoolean(); - clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { - @Override - public TimeValue timeout() { - return TimeValue.timeValueMillis(2); - } - - @Override - public void onFailure(String source, Throwable t) { - timedOut.countDown(); - } - - @Override - public ClusterState execute(ClusterState currentState) { - executeCalled.set(true); - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } - }); - - timedOut.await(); - block.countDown(); - final CountDownLatch allProcessed = new CountDownLatch(1); - clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { - @Override - public void onFailure(String source, Throwable t) { - throw new RuntimeException(t); - } - - @Override - public ClusterState execute(ClusterState currentState) { - allProcessed.countDown(); - return currentState; - } - - }); - allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called... - assertThat(executeCalled.get(), equalTo(false)); - } - public void testAckedUpdateTask() throws Exception { Settings settings = settingsBuilder() .put("discovery.type", "local") @@ -299,63 +213,6 @@ public class ClusterServiceIT extends ESIntegTestCase { assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true)); } - - public void testMasterAwareExecution() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - - InternalTestCluster.Async master = internalCluster().startNodeAsync(settings); - InternalTestCluster.Async nonMaster = internalCluster().startNodeAsync(settingsBuilder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).build()); - master.get(); - ensureGreen(); // make sure we have a cluster - - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nonMaster.get()); - - final boolean[] taskFailed = {false}; - final CountDownLatch latch1 = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - latch1.countDown(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - taskFailed[0] = true; - latch1.countDown(); - } - }); - - latch1.await(); - assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); - - taskFailed[0] = true; - final CountDownLatch latch2 = new CountDownLatch(1); - clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - taskFailed[0] = false; - latch2.countDown(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - taskFailed[0] = true; - latch2.countDown(); - } - }); - latch2.await(); - assertFalse("non-master cluster state update task was not executed", taskFailed[0]); - } - public void testAckedUpdateTaskNoAckExpected() throws Exception { Settings settings = settingsBuilder() .put("discovery.type", "local") @@ -715,571 +572,6 @@ public class ClusterServiceIT extends ESIntegTestCase { } } - /** - * Note, this test can only work as long as we have a single thread executor executing the state update tasks! - */ - public void testPrioritizedTasks() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - BlockingTask block = new BlockingTask(Priority.IMMEDIATE); - clusterService.submitStateUpdateTask("test", block); - int taskCount = randomIntBetween(5, 20); - Priority[] priorities = Priority.values(); - - // will hold all the tasks in the order in which they were executed - List tasks = new ArrayList<>(taskCount); - CountDownLatch latch = new CountDownLatch(taskCount); - for (int i = 0; i < taskCount; i++) { - Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; - clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks)); - } - - block.release(); - latch.await(); - - Priority prevPriority = null; - for (PrioritizedTask task : tasks) { - if (prevPriority == null) { - prevPriority = task.priority(); - } else { - assertThat(task.priority().sameOrAfter(prevPriority), is(true)); - } - } - } - - /* - * test that a listener throwing an exception while handling a - * notification does not prevent publication notification to the - * executor - */ - public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - final CountDownLatch latch = new CountDownLatch(1); - AtomicBoolean published = new AtomicBoolean(); - - clusterService.submitStateUpdateTask( - "testClusterStateTaskListenerThrowingExceptionIsOkay", - new Object(), - ClusterStateTaskConfig.build(Priority.NORMAL), - new ClusterStateTaskExecutor() { - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - ClusterState newClusterState = ClusterState.builder(currentState).build(); - return BatchResult.builder().successes(tasks).build(newClusterState); - } - - @Override - public void clusterStatePublished(ClusterState newClusterState) { - published.set(true); - latch.countDown(); - } - }, - new ClusterStateTaskListener() { - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - throw new IllegalStateException(source); - } - - @Override - public void onFailure(String source, Throwable t) { - } - } - ); - - latch.await(); - assertTrue(published.get()); - } - - // test that for a single thread, tasks are executed in the order - // that they are submitted - public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - class TaskExecutor implements ClusterStateTaskExecutor { - List tasks = new ArrayList<>(); - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - this.tasks.addAll(tasks); - return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); - } - - @Override - public boolean runOnlyOnMaster() { - return false; - } - } - - int numberOfThreads = randomIntBetween(2, 8); - TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; - for (int i = 0; i < numberOfThreads; i++) { - executors[i] = new TaskExecutor(); - } - - int tasksSubmittedPerThread = randomIntBetween(2, 1024); - - CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); - CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); - - ClusterStateTaskListener listener = new ClusterStateTaskListener() { - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure: [{}]", t, source); - failures.add(new Tuple<>(source, t)); - updateLatch.countDown(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - updateLatch.countDown(); - } - }; - - CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); - - for (int i = 0; i < numberOfThreads; i++) { - final int index = i; - Thread thread = new Thread(() -> { - try { - barrier.await(); - for (int j = 0; j < tasksSubmittedPerThread; j++) { - clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener); - } - barrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new AssertionError(e); - } - }); - thread.start(); - } - - // wait for all threads to be ready - barrier.await(); - // wait for all threads to finish - barrier.await(); - - updateLatch.await(); - - assertThat(failures, empty()); - - for (int i = 0; i < numberOfThreads; i++) { - assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); - for (int j = 0; j < tasksSubmittedPerThread; j++) { - assertNotNull(executors[i].tasks.get(j)); - assertEquals("cluster state update task executed out of order", j, (int)executors[i].tasks.get(j)); - } - } - } - - public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - - AtomicInteger counter = new AtomicInteger(); - class Task { - private AtomicBoolean state = new AtomicBoolean(); - - public void execute() { - if (!state.compareAndSet(false, true)) { - throw new IllegalStateException(); - } else { - counter.incrementAndGet(); - } - } - } - - int numberOfThreads = randomIntBetween(2, 8); - int tasksSubmittedPerThread = randomIntBetween(1, 1024); - int numberOfExecutors = Math.max(1, numberOfThreads / 4); - final Semaphore semaphore = new Semaphore(numberOfExecutors); - - class TaskExecutor implements ClusterStateTaskExecutor { - private AtomicInteger counter = new AtomicInteger(); - private AtomicInteger batches = new AtomicInteger(); - private AtomicInteger published = new AtomicInteger(); - - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - tasks.forEach(task -> task.execute()); - counter.addAndGet(tasks.size()); - ClusterState maybeUpdatedClusterState = currentState; - if (randomBoolean()) { - maybeUpdatedClusterState = ClusterState.builder(currentState).build(); - batches.incrementAndGet(); - semaphore.acquire(); - } - return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); - } - - @Override - public boolean runOnlyOnMaster() { - return false; - } - - @Override - public void clusterStatePublished(ClusterState newClusterState) { - published.incrementAndGet(); - semaphore.release(); - } - } - - ConcurrentMap counters = new ConcurrentHashMap<>(); - CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); - ClusterStateTaskListener listener = new ClusterStateTaskListener() { - @Override - public void onFailure(String source, Throwable t) { - assert false; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet(); - updateLatch.countDown(); - } - }; - - List executors = new ArrayList<>(); - for (int i = 0; i < numberOfExecutors; i++) { - executors.add(new TaskExecutor()); - } - - // randomly assign tasks to executors - List assignments = new ArrayList<>(); - for (int i = 0; i < numberOfThreads; i++) { - for (int j = 0; j < tasksSubmittedPerThread; j++) { - assignments.add(randomFrom(executors)); - } - } - - Map counts = new HashMap<>(); - for (TaskExecutor executor : assignments) { - counts.merge(executor, 1, (previous, one) -> previous + one); - } - - CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); - for (int i = 0; i < numberOfThreads; i++) { - final int index = i; - Thread thread = new Thread(() -> { - try { - barrier.await(); - for (int j = 0; j < tasksSubmittedPerThread; j++) { - ClusterStateTaskExecutor executor = assignments.get(index * tasksSubmittedPerThread + j); - clusterService.submitStateUpdateTask( - Thread.currentThread().getName(), - new Task(), - ClusterStateTaskConfig.build(randomFrom(Priority.values())), - executor, - listener); - } - barrier.await(); - } catch (BrokenBarrierException | InterruptedException e) { - throw new AssertionError(e); - } - }); - thread.start(); - } - - // wait for all threads to be ready - barrier.await(); - // wait for all threads to finish - barrier.await(); - - // wait until all the cluster state updates have been processed - updateLatch.await(); - // and until all of the publication callbacks have completed - semaphore.acquire(numberOfExecutors); - - // assert the number of executed tasks is correct - assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); - - // assert each executor executed the correct number of tasks - for (TaskExecutor executor : executors) { - if (counts.containsKey(executor)) { - assertEquals((int) counts.get(executor), executor.counter.get()); - assertEquals(executor.batches.get(), executor.published.get()); - } - } - - // assert the correct number of clusterStateProcessed events were triggered - for (Map.Entry entry : counters.entrySet()) { - assertEquals(entry.getValue().get(), tasksSubmittedPerThread); - } - } - - @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level - public void testClusterStateUpdateLogging() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, "*processing [test1]: took * no change in cluster_state")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, "*failed to execute cluster state update in *")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took * done applying updated cluster_state (version: *, uuid: *)")); - - Logger rootLogger = Logger.getRootLogger(); - rootLogger.addAppender(mockAppender); - try { - final CountDownLatch latch = new CountDownLatch(4); - clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - fail(); - } - - @Override - public void onFailure(String source, Throwable t) { - latch.countDown(); - } - }); - clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).incrementVersion().build(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - // Additional update task to make sure all previous logging made it to the logger - // We don't check logging for this on since there is no guarantee that it will occur before our check - clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - } finally { - rootLogger.removeAppender(mockAppender); - } - mockAppender.assertAllExpectationsMatched(); - } - - @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level - public void testLongClusterStateUpdateLogging() throws Exception { - Settings settings = settingsBuilder() - .put("discovery.type", "local") - .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10s") - .build(); - internalCluster().startNode(settings); - ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); - MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", "cluster.service", Level.WARN, "*cluster state update task [test1] took * above the warn threshold of *")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, "*cluster state update task [test2] took * above the warn threshold of 10ms")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, "*cluster state update task [test3] took * above the warn threshold of 10ms")); - mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took * above the warn threshold of 10ms")); - - Logger rootLogger = Logger.getRootLogger(); - rootLogger.addAppender(mockAppender); - try { - final CountDownLatch latch = new CountDownLatch(5); - final CountDownLatch processedFirstTask = new CountDownLatch(1); - clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - processedFirstTask.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - - processedFirstTask.await(1, TimeUnit.SECONDS); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10ms"))); - - clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - fail(); - } - - @Override - public void onFailure(String source, Throwable t) { - latch.countDown(); - } - }); - clusterService1.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - return ClusterState.builder(currentState).incrementVersion().build(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - clusterService1.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Thread.sleep(100); - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - // Additional update task to make sure all previous logging made it to the logger - // We don't check logging for this on since there is no guarantee that it will occur before our check - clusterService1.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - return currentState; - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - latch.countDown(); - } - - @Override - public void onFailure(String source, Throwable t) { - fail(); - } - }); - assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true)); - } finally { - rootLogger.removeAppender(mockAppender); - } - mockAppender.assertAllExpectationsMatched(); - } - - private static class BlockingTask extends ClusterStateUpdateTask { - private final CountDownLatch latch = new CountDownLatch(1); - - public BlockingTask(Priority priority) { - super(priority); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - latch.await(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - } - - public void release() { - latch.countDown(); - } - - } - - private static class PrioritizedTask extends ClusterStateUpdateTask { - - private final CountDownLatch latch; - private final List tasks; - - private PrioritizedTask(Priority priority, CountDownLatch latch, List tasks) { - super(priority); - this.latch = latch; - this.tasks = tasks; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - tasks.add(this); - latch.countDown(); - return currentState; - } - - @Override - public void onFailure(String source, Throwable t) { - latch.countDown(); - } - } - public static class TestPlugin extends Plugin { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java new file mode 100644 index 00000000000..84c9e9f07a0 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -0,0 +1,275 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportServiceAdapter; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; + +public class NodeConnectionsServiceTests extends ESTestCase { + + private static ThreadPool THREAD_POOL; + private MockTransport transport; + private TransportService transportService; + + private List generateNodes() { + List nodes = new ArrayList<>(); + for (int i = randomIntBetween(20, 50); i > 0; i--) { + final HashMap attributes = new HashMap<>(); + if (rarely()) { + attributes.put("client", "true"); + } else { + attributes.put("master", "" + randomBoolean()); + attributes.put("data", "" + randomBoolean()); + attributes.put("ingest", "" + randomBoolean()); + } + nodes.add(new DiscoveryNode("node_" + i, "" + i, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT)); + } + return nodes; + } + + private ClusterState clusterStateFromNodes(List nodes) { + final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (DiscoveryNode node : nodes) { + builder.put(node); + } + return ClusterState.builder(new ClusterName("test")).nodes(builder).build(); + } + + public void testConnectAndDisconnect() { + List nodes = generateNodes(); + NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService); + + ClusterState current = clusterStateFromNodes(Collections.emptyList()); + ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); + + service.connectToAddedNodes(event); + assertConnected(event.nodesDelta().addedNodes()); + + service.disconnectFromRemovedNodes(event); + assertConnectedExactlyToNodes(event.state()); + + current = event.state(); + event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); + + service.connectToAddedNodes(event); + assertConnected(event.nodesDelta().addedNodes()); + + service.disconnectFromRemovedNodes(event); + assertConnectedExactlyToNodes(event.state()); + } + + + public void testReconnect() { + List nodes = generateNodes(); + NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, THREAD_POOL, transportService); + + ClusterState current = clusterStateFromNodes(Collections.emptyList()); + ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); + + transport.randomConnectionExceptions = true; + + service.connectToAddedNodes(event); + + for (int i = 0; i < 3; i++) { + // simulate disconnects + for (DiscoveryNode node : randomSubsetOf(nodes)) { + transport.disconnectFromNode(node); + } + service.new ConnectionChecker().run(); + } + + // disable exceptions so things can be restored + transport.randomConnectionExceptions = false; + service.new ConnectionChecker().run(); + assertConnectedExactlyToNodes(event.state()); + } + + private void assertConnectedExactlyToNodes(ClusterState state) { + assertConnected(state.nodes()); + assertThat(transport.connectedNodes.size(), equalTo(state.nodes().size())); + } + + private void assertConnected(Iterable nodes) { + for (DiscoveryNode node : nodes) { + assertTrue("not connected to " + node, transport.connectedNodes.contains(node)); + } + } + + private void assertNotConnected(Iterable nodes) { + for (DiscoveryNode node : nodes) { + assertFalse("still connected to " + node, transport.connectedNodes.contains(node)); + } + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + this.transport = new MockTransport(); + transportService = new TransportService(transport, THREAD_POOL); + transportService.start(); + transportService.acceptIncomingRequests(); + } + + @Override + @After + public void tearDown() throws Exception { + transportService.stop(); + super.tearDown(); + } + + @AfterClass + public static void stopThreadPool() { + ThreadPool.terminate(THREAD_POOL, 30, TimeUnit.SECONDS); + THREAD_POOL = null; + } + + + final class MockTransport implements Transport { + + Set connectedNodes = ConcurrentCollections.newConcurrentSet(); + volatile boolean randomConnectionExceptions = false; + + @Override + public void transportServiceAdapter(TransportServiceAdapter service) { + + } + + @Override + public BoundTransportAddress boundAddress() { + return null; + } + + @Override + public Map profileBoundAddresses() { + return null; + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception { + return new TransportAddress[0]; + } + + @Override + public boolean addressSupported(Class address) { + return false; + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + return connectedNodes.contains(node); + } + + @Override + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + if (connectedNodes.contains(node) == false && randomConnectionExceptions && randomBoolean()) { + throw new ConnectTransportException(node, "simulated"); + } + connectedNodes.add(node); + } + + @Override + public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { + + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + connectedNodes.remove(node); + } + + @Override + public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException, TransportException { + + } + + @Override + public long serverOpen() { + return 0; + } + + @Override + public List getLocalAddresses() { + return null; + } + + @Override + public Lifecycle.State lifecycleState() { + return null; + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + + } + + @Override + public Transport start() { + return null; + } + + @Override + public Transport stop() { + return null; + } + + @Override + public void close() { + + } + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 6339c700eec..29ce8e7a636 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -40,7 +41,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.test.ESAllocationTestCase; @@ -305,7 +305,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0); } else { return - TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), InternalClusterService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values())); + TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), DiscoveryNodeService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java new file mode 100644 index 00000000000..ff55de45649 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -0,0 +1,824 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.service; + +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class ClusterServiceTests extends ESTestCase { + + static ThreadPool threadPool; + TimedClusterService clusterService; + + @BeforeClass + public static void createThreadPool() { + threadPool = new ThreadPool(ClusterServiceTests.class.getName()); + } + + @AfterClass + public static void stopThreadPool() { + if (threadPool != null) { + threadPool.shutdownNow(); + threadPool = null; + } + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(true); + } + + @After + public void tearDown() throws Exception { + clusterService.close(); + super.tearDown(); + } + + TimedClusterService createClusterService(boolean makeMaster) throws InterruptedException { + TimedClusterService test = new TimedClusterService(Settings.EMPTY, null, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool, new ClusterName("ClusterServiceTests")); + test.setLocalNode(new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT)); + test.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { + @Override + public void connectToAddedNodes(ClusterChangedEvent event) { + // skip + } + + @Override + public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + // skip + } + }); + test.setClusterStatePublisher((event, ackListener) -> { + }); + test.start(); + CountDownLatch latch = new CountDownLatch(1); + test.submitStateUpdateTask("making a master", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + final DiscoveryNodes nodes = currentState.nodes(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes) + .masterNodeId(makeMaster ? nodes.localNodeId() : null); + return ClusterState.builder(currentState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + logger.warn("unexpected exception", t); + fail("unexpected exception" + t); + } + }); + latch.await(); + return test; + } + + public void testTimeoutUpdateTask() throws Exception { + final CountDownLatch block = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + try { + block.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + throw new RuntimeException(t); + } + }); + + final CountDownLatch timedOut = new CountDownLatch(1); + final AtomicBoolean executeCalled = new AtomicBoolean(); + clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + @Override + public TimeValue timeout() { + return TimeValue.timeValueMillis(2); + } + + @Override + public void onFailure(String source, Throwable t) { + timedOut.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) { + executeCalled.set(true); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + } + }); + + timedOut.await(); + block.countDown(); + final CountDownLatch allProcessed = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + @Override + public void onFailure(String source, Throwable t) { + throw new RuntimeException(t); + } + + @Override + public ClusterState execute(ClusterState currentState) { + allProcessed.countDown(); + return currentState; + } + + }); + allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called... + assertThat(executeCalled.get(), equalTo(false)); + } + + + public void testMasterAwareExecution() throws Exception { + ClusterService nonMaster = createClusterService(false); + + final boolean[] taskFailed = {false}; + final CountDownLatch latch1 = new CountDownLatch(1); + nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + latch1.countDown(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + taskFailed[0] = true; + latch1.countDown(); + } + }); + + latch1.await(); + assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); + + taskFailed[0] = true; + final CountDownLatch latch2 = new CountDownLatch(1); + nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + taskFailed[0] = false; + latch2.countDown(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + taskFailed[0] = true; + latch2.countDown(); + } + }); + latch2.await(); + assertFalse("non-master cluster state update task was not executed", taskFailed[0]); + + nonMaster.close(); + } + + /* + * test that a listener throwing an exception while handling a + * notification does not prevent publication notification to the + * executor + */ + public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean published = new AtomicBoolean(); + + clusterService.submitStateUpdateTask( + "testClusterStateTaskListenerThrowingExceptionIsOkay", + new Object(), + ClusterStateTaskConfig.build(Priority.NORMAL), + new ClusterStateTaskExecutor() { + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterState newClusterState = ClusterState.builder(currentState).build(); + return BatchResult.builder().successes(tasks).build(newClusterState); + } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.set(true); + latch.countDown(); + } + }, + new ClusterStateTaskListener() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + throw new IllegalStateException(source); + } + + @Override + public void onFailure(String source, Throwable t) { + } + } + ); + + latch.await(); + assertTrue(published.get()); + } + + // test that for a single thread, tasks are executed in the order + // that they are submitted + public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException { + class TaskExecutor implements ClusterStateTaskExecutor { + List tasks = new ArrayList<>(); + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + this.tasks.addAll(tasks); + return BatchResult.builder().successes(tasks).build(ClusterState.builder(currentState).build()); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + } + + int numberOfThreads = randomIntBetween(2, 8); + TaskExecutor[] executors = new TaskExecutor[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) { + executors[i] = new TaskExecutor(); + } + + int tasksSubmittedPerThread = randomIntBetween(2, 1024); + + CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); + CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + + ClusterStateTaskListener listener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure: [{}]", t, source); + failures.add(new Tuple<>(source, t)); + updateLatch.countDown(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + updateLatch.countDown(); + } + }; + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j, + ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener); + } + barrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new AssertionError(e); + } + }); + thread.start(); + } + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); + + updateLatch.await(); + + assertThat(failures, empty()); + + for (int i = 0; i < numberOfThreads; i++) { + assertEquals(tasksSubmittedPerThread, executors[i].tasks.size()); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + assertNotNull(executors[i].tasks.get(j)); + assertEquals("cluster state update task executed out of order", j, (int) executors[i].tasks.get(j)); + } + } + } + + public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException { + AtomicInteger counter = new AtomicInteger(); + class Task { + private AtomicBoolean state = new AtomicBoolean(); + + public void execute() { + if (!state.compareAndSet(false, true)) { + throw new IllegalStateException(); + } else { + counter.incrementAndGet(); + } + } + } + + int numberOfThreads = randomIntBetween(2, 8); + int tasksSubmittedPerThread = randomIntBetween(1, 1024); + int numberOfExecutors = Math.max(1, numberOfThreads / 4); + final Semaphore semaphore = new Semaphore(numberOfExecutors); + + class TaskExecutor implements ClusterStateTaskExecutor { + private AtomicInteger counter = new AtomicInteger(); + private AtomicInteger batches = new AtomicInteger(); + private AtomicInteger published = new AtomicInteger(); + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + tasks.forEach(task -> task.execute()); + counter.addAndGet(tasks.size()); + ClusterState maybeUpdatedClusterState = currentState; + if (randomBoolean()) { + maybeUpdatedClusterState = ClusterState.builder(currentState).build(); + batches.incrementAndGet(); + semaphore.acquire(); + } + return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.incrementAndGet(); + semaphore.release(); + } + } + + ConcurrentMap counters = new ConcurrentHashMap<>(); + CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + ClusterStateTaskListener listener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + assert false; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet(); + updateLatch.countDown(); + } + }; + + List executors = new ArrayList<>(); + for (int i = 0; i < numberOfExecutors; i++) { + executors.add(new TaskExecutor()); + } + + // randomly assign tasks to executors + List assignments = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + for (int j = 0; j < tasksSubmittedPerThread; j++) { + assignments.add(randomFrom(executors)); + } + } + + Map counts = new HashMap<>(); + for (TaskExecutor executor : assignments) { + counts.merge(executor, 1, (previous, one) -> previous + one); + } + + CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + ClusterStateTaskExecutor executor = assignments.get(index * tasksSubmittedPerThread + j); + clusterService.submitStateUpdateTask( + Thread.currentThread().getName(), + new Task(), + ClusterStateTaskConfig.build(randomFrom(Priority.values())), + executor, + listener); + } + barrier.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); + } + }); + thread.start(); + } + + // wait for all threads to be ready + barrier.await(); + // wait for all threads to finish + barrier.await(); + + // wait until all the cluster state updates have been processed + updateLatch.await(); + // and until all of the publication callbacks have completed + semaphore.acquire(numberOfExecutors); + + // assert the number of executed tasks is correct + assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); + + // assert each executor executed the correct number of tasks + for (TaskExecutor executor : executors) { + if (counts.containsKey(executor)) { + assertEquals((int) counts.get(executor), executor.counter.get()); + assertEquals(executor.batches.get(), executor.published.get()); + } + } + + // assert the correct number of clusterStateProcessed events were triggered + for (Map.Entry entry : counters.entrySet()) { + assertEquals(entry.getValue().get(), tasksSubmittedPerThread); + } + } + + /** + * Note, this test can only work as long as we have a single thread executor executing the state update tasks! + */ + public void testPrioritizedTasks() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + BlockingTask block = new BlockingTask(Priority.IMMEDIATE); + clusterService.submitStateUpdateTask("test", block); + int taskCount = randomIntBetween(5, 20); + Priority[] priorities = Priority.values(); + + // will hold all the tasks in the order in which they were executed + List tasks = new ArrayList<>(taskCount); + CountDownLatch latch = new CountDownLatch(taskCount); + for (int i = 0; i < taskCount; i++) { + Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; + clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks)); + } + + block.release(); + latch.await(); + + Priority prevPriority = null; + for (PrioritizedTask task : tasks) { + if (prevPriority == null) { + prevPriority = task.priority(); + } else { + assertThat(task.priority().sameOrAfter(prevPriority), is(true)); + } + } + } + + @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level + public void testClusterStateUpdateLogging() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, + "*processing [test1]: took [1s] no change in cluster_state")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, + "*failed to execute cluster state update in [2s]*")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, + "*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)")); + + Logger rootLogger = Logger.getRootLogger(); + rootLogger.addAppender(mockAppender); + try { + final CountDownLatch latch = new CountDownLatch(4); + clusterService.currentTimeOverride = System.nanoTime(); + clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos(); + throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail(); + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + }); + clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(3).nanos(); + return ClusterState.builder(currentState).incrementVersion().build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + // Additional update task to make sure all previous logging made it to the logger + // We don't check logging for this on since there is no guarantee that it will occur before our check + clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + latch.await(); + } finally { + rootLogger.removeAppender(mockAppender); + } + mockAppender.assertAllExpectationsMatched(); + } + + @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level + public void testLongClusterStateUpdateLogging() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", + "cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, + "*cluster state update task [test2] took [32s] above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, + "*cluster state update task [test3] took [33s] above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, + "*cluster state update task [test4] took [34s] above the warn threshold of *")); + + Logger rootLogger = Logger.getRootLogger(); + rootLogger.addAppender(mockAppender); + try { + final CountDownLatch latch = new CountDownLatch(5); + final CountDownLatch processedFirstTask = new CountDownLatch(1); + clusterService.currentTimeOverride = System.nanoTime(); + clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + processedFirstTask.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + + processedFirstTask.await(); + clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos(); + throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail(); + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + }); + clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(33).nanos(); + return ClusterState.builder(currentState).incrementVersion().build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + clusterService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos(); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + // Additional update task to make sure all previous logging made it to the logger + // We don't check logging for this on since there is no guarantee that it will occur before our check + clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + latch.await(); + } finally { + rootLogger.removeAppender(mockAppender); + } + mockAppender.assertAllExpectationsMatched(); + } + + private static class BlockingTask extends ClusterStateUpdateTask { + private final CountDownLatch latch = new CountDownLatch(1); + + public BlockingTask(Priority priority) { + super(priority); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + latch.await(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + } + + public void release() { + latch.countDown(); + } + + } + + private static class PrioritizedTask extends ClusterStateUpdateTask { + + private final CountDownLatch latch; + private final List tasks; + + private PrioritizedTask(Priority priority, CountDownLatch latch, List tasks) { + super(priority); + this.latch = latch; + this.tasks = tasks; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + tasks.add(this); + latch.countDown(); + return currentState; + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + } + + static class TimedClusterService extends InternalClusterService { + + public volatile Long currentTimeOverride = null; + + public TimedClusterService(Settings settings, OperationRouting operationRouting, ClusterSettings clusterSettings, + ThreadPool threadPool, ClusterName clusterName) { + super(settings, operationRouting, clusterSettings, threadPool, clusterName); + } + + @Override + protected long currentTimeInNanos() { + if (currentTimeOverride != null) { + return currentTimeOverride; + } + return super.currentTimeInNanos(); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java index c0866a81081..9e4a881b25b 100644 --- a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java +++ b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java @@ -80,7 +80,7 @@ public class MockLogAppender extends AppenderSkeleton { protected final String logger; protected final Level level; protected final String message; - protected boolean saw; + volatile boolean saw; public AbstractEventExpectation(String name, String logger, Level level, String message) { this.name = name; diff --git a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java index 9581dfff42f..f9451375590 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.netty; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -29,9 +30,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; public class KeyedLockTests extends ESTestCase { @@ -68,28 +67,6 @@ public class KeyedLockTests extends ESTestCase { } } - public void testCannotAcquireTwoLocks() throws InterruptedException { - KeyedLock connectionLock = new KeyedLock(); - String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); - connectionLock.acquire(name); - try { - connectionLock.acquire(name); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString("Lock already acquired")); - } - } - - public void testCannotReleaseUnacquiredLock() throws InterruptedException { - KeyedLock connectionLock = new KeyedLock(); - String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); - try { - connectionLock.release(name); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("Lock not acquired")); - } - } public static class AcquireAndReleaseThread extends Thread { private CountDownLatch startLatch; @@ -117,16 +94,16 @@ public class KeyedLockTests extends ESTestCase { int numRuns = scaledRandomIntBetween(5000, 50000); for (int i = 0; i < numRuns; i++) { String curName = names[randomInt(names.length - 1)]; - connectionLock.acquire(curName); - try { + assert connectionLock.isHeldByCurrentThread(curName) == false; + try (Releasable ignored = connectionLock.acquire(curName)) { + assert connectionLock.isHeldByCurrentThread(curName); + assert connectionLock.isHeldByCurrentThread(curName + "bla") == false; Integer integer = counter.get(curName); if (integer == null) { counter.put(curName, 1); } else { counter.put(curName, integer.intValue() + 1); } - } finally { - connectionLock.release(curName); } AtomicInteger atomicInteger = new AtomicInteger(0); AtomicInteger value = safeCounter.putIfAbsent(curName, atomicInteger); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 9c68ea196aa..63c09890acc 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -23,7 +23,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -66,14 +66,14 @@ public class TribeUnitTests extends ESTestCase { .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") - .put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build()).start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node") - .put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build()).start(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 84d88733802..c2cb12644b0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -67,6 +67,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Random; @@ -282,10 +283,10 @@ public abstract class ESTestCase extends LuceneTestCase { * Returns a double value in the interval [start, end) if lowerInclusive is * set to true, (start, end) otherwise. * - * @param start lower bound of interval to draw uniformly distributed random numbers from - * @param end upper bound + * @param start lower bound of interval to draw uniformly distributed random numbers from + * @param end upper bound * @param lowerInclusive whether or not to include lower end of the interval - * */ + */ public static double randomDoubleBetween(double start, double end, boolean lowerInclusive) { double result = 0.0; @@ -555,12 +556,27 @@ public abstract class ESTestCase extends LuceneTestCase { * Returns size random values */ public static List randomSubsetOf(int size, T... values) { - if (size > values.length) { - throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); - } List list = arrayAsArrayList(values); - Collections.shuffle(list, random()); - return list.subList(0, size); + return randomSubsetOf(size, list); + } + + /** + * Returns a random subset of values (including a potential empty list) + */ + public static List randomSubsetOf(Collection collection) { + return randomSubsetOf(randomInt(collection.size() - 1), collection); + } + + /** + * Returns size random values + */ + public static List randomSubsetOf(int size, Collection collection) { + if (size > collection.size()) { + throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a collection of " + collection.size() + " objects"); + } + List tempList = new ArrayList<>(collection); + Collections.shuffle(tempList, random()); + return tempList.subList(0, size); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 82c7db11d69..43483f17117 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -39,13 +39,13 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -591,7 +591,7 @@ public final class InternalTestCluster extends TestCluster { .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home .put(settings) .put("node.name", name) - .put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), seed) + .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), seed) .build(); MockNode node = new MockNode(finalSettings, version, plugins); return new NodeAndClient(name, node); @@ -838,8 +838,8 @@ public final class InternalTestCluster extends TestCluster { IOUtils.rm(nodeEnv.nodeDataPaths()); } } - final long newIdSeed = InternalClusterService.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id - Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); + final long newIdSeed = DiscoveryNodeService.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id + Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); Collection> plugins = node.getPlugins(); Version version = node.getVersion(); node = new MockNode(finalSettings, version, plugins); diff --git a/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java index 99ba809c144..ad73a097c1e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.tasks.TaskManager; import java.util.List; @@ -153,11 +152,6 @@ public class NoopClusterService implements ClusterService { return TimeValue.timeValueMillis(0); } - @Override - public TaskManager getTaskManager() { - return null; - } - @Override public Lifecycle.State lifecycleState() { return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java index 3b1082cae44..ebae5cc9947 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -47,9 +47,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; import java.util.Arrays; import java.util.Iterator; @@ -62,7 +60,6 @@ import java.util.concurrent.ScheduledFuture; public class TestClusterService implements ClusterService { volatile ClusterState state; - private volatile TaskManager taskManager; private final List listeners = new CopyOnWriteArrayList<>(); private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); private final ThreadPool threadPool; @@ -75,12 +72,6 @@ public class TestClusterService implements ClusterService { public TestClusterService(ThreadPool threadPool) { this(ClusterState.builder(new ClusterName("test")).build(), threadPool); - taskManager = new TaskManager(Settings.EMPTY); - } - - public TestClusterService(ThreadPool threadPool, TransportService transportService) { - this(ClusterState.builder(new ClusterName("test")).build(), threadPool); - taskManager = transportService.getTaskManager(); } public TestClusterService(ClusterState state) { @@ -243,11 +234,6 @@ public class TestClusterService implements ClusterService { throw new UnsupportedOperationException(); } - @Override - public TaskManager getTaskManager() { - return taskManager; - } - @Override public List pendingTasks() { throw new UnsupportedOperationException(); From f9622f9acc86bee251b8b083aa34bff8384b9fbd Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 10 Mar 2016 12:33:14 +0100 Subject: [PATCH 27/37] Docs: Added a note about the update API not supporting external versioning Closes #12820 --- docs/reference/docs/update.asciidoc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 634bc23d6ac..35dbccf7aa2 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -251,5 +251,15 @@ sure the document doesn't change during the update. You can use the `version` parameter to specify that the document should only be updated if its version matches the one specified. By setting version type to `force` you can force the new version of the document after update (use with care! with `force` -there is no guarantee the document didn't change).Version types `external` & -`external_gte` are not supported. +there is no guarantee the document didn't change). + +[NOTE] +.The update API does not support external versioning +===================================================== + +External versioning (version types `external` & `external_gte`) is not +supported by the update API as it would result in Elasticsearch version +numbers being out of sync with the external system. Use the +<> instead. + +===================================================== From a8c7ae78094780bacc53e17763620ed7c0a8ab11 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 10 Mar 2016 13:08:29 +0100 Subject: [PATCH 28/37] Fixed bad docs link --- docs/reference/docs/update.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 35dbccf7aa2..316714259e0 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -260,6 +260,6 @@ there is no guarantee the document didn't change). External versioning (version types `external` & `external_gte`) is not supported by the update API as it would result in Elasticsearch version numbers being out of sync with the external system. Use the -<> instead. +<> instead. ===================================================== From 9f923255878b7baefd89bc37af8fe3072f163322 Mon Sep 17 00:00:00 2001 From: jaymode Date: Thu, 10 Mar 2016 07:16:37 -0500 Subject: [PATCH 29/37] Allow additional settings for the node in ESSingleNodeTestCase This change adds a method that extending classes can override to provide additional settings for the node used in a single node test case. --- .../java/org/elasticsearch/test/ESSingleNodeTestCase.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 6e16d60eafc..57dfc106845 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -160,6 +159,11 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { return Arrays.asList(plugins); } + /** Additional settings to add when creating the node. Also allows overriding the default settings. */ + protected Settings nodeSettings() { + return Settings.EMPTY; + } + private Node newNode() { Settings settings = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) @@ -177,6 +181,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { .put(Node.NODE_LOCAL_SETTING.getKey(), true) .put(Node.NODE_DATA_SETTING.getKey(), true) .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true) // make sure we get what we set :) + .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); Node build = new MockNode(settings, getVersion(), getPlugins()); build.start(); From 2fa33d5c47d292fdcd15c7acbefe2a579c3e9a38 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 2 Mar 2016 17:57:45 +0100 Subject: [PATCH 30/37] Added ingest statistics to node stats API The ingest stats include the following statistics: * `ingest.total.count`- The total number of document ingested during the lifetime of this node * `ingest.total.time_in_millis` - The total time spent on ingest preprocessing documents during the lifetime of this node * `ingest.total.current` - The total number of documents currently being ingested. * `ingest.total.failed` - The total number ingest preprocessing operations failed during the lifetime of this node Also these stats are returned on a per pipeline basis. --- .../admin/cluster/node/stats/NodeStats.java | 20 +- .../cluster/node/stats/NodesStatsRequest.java | 16 ++ .../node/stats/NodesStatsRequestBuilder.java | 8 + .../node/stats/TransportNodesStatsAction.java | 3 +- .../stats/TransportClusterStatsAction.java | 2 +- .../action/ingest/IngestActionFilter.java | 2 +- .../common/io/stream/StreamInput.java | 9 + .../common/io/stream/StreamOutput.java | 9 + .../org/elasticsearch/ingest/IngestStats.java | 171 ++++++++++++++++++ .../ingest/PipelineExecutionService.java | 141 ++++++++++++--- .../node/service/NodeService.java | 9 +- .../node/stats/RestNodesStatsAction.java | 3 +- .../elasticsearch/cluster/DiskUsageTests.java | 12 +- .../ingest/PipelineExecutionServiceTests.java | 41 ++++- docs/reference/cluster/nodes-stats.asciidoc | 23 +++ docs/reference/ingest/ingest-node.asciidoc | 3 + .../rest-api-spec/test/ingest/70_bulk.yaml | 34 ++++ .../MockInternalClusterInfoService.java | 2 +- .../test/InternalTestCluster.java | 2 +- 19 files changed, 469 insertions(+), 41 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/ingest/IngestStats.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index a4cf2b1de2a..c1d4bb78ba3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -31,6 +31,7 @@ import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.http.HttpStats; import org.elasticsearch.indices.NodeIndicesStats; import org.elasticsearch.indices.breaker.AllCircuitBreakerStats; +import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.jvm.JvmStats; import org.elasticsearch.monitor.os.OsStats; @@ -81,6 +82,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { @Nullable private DiscoveryStats discoveryStats; + @Nullable + private IngestStats ingestStats; + NodeStats() { } @@ -89,7 +93,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { @Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http, @Nullable AllCircuitBreakerStats breaker, @Nullable ScriptStats scriptStats, - @Nullable DiscoveryStats discoveryStats) { + @Nullable DiscoveryStats discoveryStats, + @Nullable IngestStats ingestStats) { super(node); this.timestamp = timestamp; this.indices = indices; @@ -103,6 +108,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { this.breaker = breaker; this.scriptStats = scriptStats; this.discoveryStats = discoveryStats; + this.ingestStats = ingestStats; } public long getTimestamp() { @@ -187,6 +193,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { return this.discoveryStats; } + @Nullable + public IngestStats getIngestStats() { + return ingestStats; + } + public static NodeStats readNodeStats(StreamInput in) throws IOException { NodeStats nodeInfo = new NodeStats(); nodeInfo.readFrom(in); @@ -224,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in); scriptStats = in.readOptionalStreamable(ScriptStats::new); discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null)); - + ingestStats = in.readOptionalWritable(IngestStats.PROTO); } @Override @@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { out.writeOptionalStreamable(breaker); out.writeOptionalStreamable(scriptStats); out.writeOptionalStreamable(discoveryStats); + out.writeOptionalWriteable(ingestStats); } @Override @@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { getDiscoveryStats().toXContent(builder, params); } + if (getIngestStats() != null) { + getIngestStats().toXContent(builder, params); + } + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 5916421c1ed..88162a617a8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest { private boolean breaker; private boolean script; private boolean discovery; + private boolean ingest; public NodesStatsRequest() { } @@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest { this.breaker = true; this.script = true; this.discovery = true; + this.ingest = true; return this; } @@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest { this.breaker = false; this.script = false; this.discovery = false; + this.ingest = false; return this; } @@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest { return this; } + public boolean ingest() { + return ingest; + } + + /** + * Should ingest statistics be returned. + */ + public NodesStatsRequest ingest(boolean ingest) { + this.ingest = ingest; + return this; + } @Override public void readFrom(StreamInput in) throws IOException { @@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest { breaker = in.readBoolean(); script = in.readBoolean(); discovery = in.readBoolean(); + ingest = in.readBoolean(); } @Override @@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest { out.writeBoolean(breaker); out.writeBoolean(script); out.writeBoolean(discovery); + out.writeBoolean(ingest); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index dc35eefee7d..027e6122681 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index 75555ccff7c..1eb9337c814 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -112,7 +112,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio logger.error("failed to execute pipeline for a bulk request", throwable); listener.onFailure(throwable); } else { - long ingestTookInMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - ingestStartTimeInNanos, TimeUnit.NANOSECONDS); + long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); if (bulkRequest.requests().isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 8eda42ae9be..e84766d021b 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; +import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.search.aggregations.AggregatorBuilder; @@ -552,6 +553,14 @@ public abstract class StreamInput extends InputStream { } } + public T readOptionalWritable(T prototype) throws IOException { + if (readBoolean()) { + return (T) prototype.readFrom(this); + } else { + return null; + } + } + public T readThrowable() throws IOException { if (readBoolean()) { int key = readVInt(); diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 864da006bf0..0251e1e74be 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -520,6 +520,15 @@ public abstract class StreamOutput extends OutputStream { } } + public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException { + if (writeable != null) { + writeBoolean(true); + writeable.writeTo(this); + } else { + writeBoolean(false); + } + } + public void writeThrowable(Throwable throwable) throws IOException { if (throwable == null) { writeBoolean(false); diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestStats.java b/core/src/main/java/org/elasticsearch/ingest/IngestStats.java new file mode 100644 index 00000000000..a59ddce4fec --- /dev/null +++ b/core/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class IngestStats implements Writeable, ToXContent { + + public final static IngestStats PROTO = new IngestStats(null, null); + + private final Stats totalStats; + private final Map statsPerPipeline; + + public IngestStats(Stats totalStats, Map statsPerPipeline) { + this.totalStats = totalStats; + this.statsPerPipeline = statsPerPipeline; + } + + /** + * @return The accumulated stats for all pipelines + */ + public Stats getTotalStats() { + return totalStats; + } + + /** + * @return The stats on a per pipeline basis + */ + public Map getStatsPerPipeline() { + return statsPerPipeline; + } + + @Override + public IngestStats readFrom(StreamInput in) throws IOException { + Stats totalStats = Stats.PROTO.readFrom(in); + totalStats.readFrom(in); + int size = in.readVInt(); + Map statsPerPipeline = new HashMap<>(size); + for (int i = 0; i < size; i++) { + Stats stats = Stats.PROTO.readFrom(in); + statsPerPipeline.put(in.readString(), stats); + stats.readFrom(in); + } + return new IngestStats(totalStats, statsPerPipeline); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + totalStats.writeTo(out); + out.writeVLong(statsPerPipeline.size()); + for (Map.Entry entry : statsPerPipeline.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("ingest"); + builder.startObject("total"); + totalStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("pipelines"); + for (Map.Entry entry : statsPerPipeline.entrySet()) { + builder.startObject(entry.getKey()); + entry.getValue().toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + public static class Stats implements Writeable, ToXContent { + + private final static Stats PROTO = new Stats(0, 0, 0, 0); + + private final long ingestCount; + private final long ingestTimeInMillis; + private final long ingestCurrent; + private final long ingestFailedCount; + + public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) { + this.ingestCount = ingestCount; + this.ingestTimeInMillis = ingestTimeInMillis; + this.ingestCurrent = ingestCurrent; + this.ingestFailedCount = ingestFailedCount; + } + + /** + * @return The total number of executed ingest preprocessing operations. + */ + public long getIngestCount() { + return ingestCount; + } + + /** + * + * @return The total time spent of ingest preprocessing in millis. + */ + public long getIngestTimeInMillis() { + return ingestTimeInMillis; + } + + /** + * @return The total number of ingest preprocessing operations currently executing. + */ + public long getIngestCurrent() { + return ingestCurrent; + } + + /** + * @return The total number of ingest preprocessing operations that have failed. + */ + public long getIngestFailedCount() { + return ingestFailedCount; + } + + @Override + public Stats readFrom(StreamInput in) throws IOException { + long ingestCount = in.readVLong(); + long ingestTimeInMillis = in.readVLong(); + long ingestCurrent = in.readVLong(); + long ingestFailedCount = in.readVLong(); + return new Stats(ingestCount, ingestTimeInMillis, ingestCurrent, ingestFailedCount); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(ingestCount); + out.writeVLong(ingestTimeInMillis); + out.writeVLong(ingestCurrent); + out.writeVLong(ingestFailedCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("count", ingestCount); + builder.timeValueField("time_in_millis", "time", ingestTimeInMillis, TimeUnit.MILLISECONDS); + builder.field("current", ingestCurrent); + builder.field("failed", ingestFailedCount); + return builder; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 3f0de550782..94c79db30a0 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,23 +19,36 @@ package org.elasticsearch.ingest; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.threadpool.ThreadPool; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; -public class PipelineExecutionService { +public class PipelineExecutionService implements ClusterStateListener { private final PipelineStore store; private final ThreadPool threadPool; + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map statsHolderPerPipeline = Collections.emptyMap(); + public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { this.store = store; this.threadPool = threadPool; @@ -89,29 +102,85 @@ public class PipelineExecutionService { }); } - private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { - String index = indexRequest.index(); - String type = indexRequest.type(); - String id = indexRequest.id(); - String routing = indexRequest.routing(); - String parent = indexRequest.parent(); - String timestamp = indexRequest.timestamp(); - String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString(); - Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap); - pipeline.execute(ingestDocument); + public IngestStats stats() { + Map statsHolderPerPipeline = this.statsHolderPerPipeline; - Map metadataMap = ingestDocument.extractMetadata(); - //it's fine to set all metadata fields all the time, as ingest document holds their starting values - //before ingestion, which might also get modified during ingestion. - indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); - indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); - indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); - indexRequest.source(ingestDocument.getSourceAndMetadata()); + Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); + for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { + statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); + } + + return new IngestStats(totalStats.createStats(), statsPerPipeline); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); + if (ingestMetadata != null) { + updatePipelineStats(ingestMetadata); + } + } + + void updatePipelineStats(IngestMetadata ingestMetadata) { + boolean changed = false; + Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); + for (String pipeline : newStatsPerPipeline.keySet()) { + if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { + newStatsPerPipeline.remove(pipeline); + changed = true; + } + } + for (String pipeline : ingestMetadata.getPipelines().keySet()) { + if (newStatsPerPipeline.containsKey(pipeline) == false) { + newStatsPerPipeline.put(pipeline, new StatsHolder()); + changed = true; + } + } + + if (changed) { + statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); + } + } + + private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { + long startTimeInNanos = System.nanoTime(); + // the pipeline specific stat holder may not exist and that is fine: + // (e.g. the pipeline may have been removed while we're ingesting a document + Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); + try { + totalStats.preIngest(); + pipelineStats.ifPresent(StatsHolder::preIngest); + String index = indexRequest.index(); + String type = indexRequest.type(); + String id = indexRequest.id(); + String routing = indexRequest.routing(); + String parent = indexRequest.parent(); + String timestamp = indexRequest.timestamp(); + String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString(); + Map sourceAsMap = indexRequest.sourceAsMap(); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap); + pipeline.execute(ingestDocument); + + Map metadataMap = ingestDocument.extractMetadata(); + //it's fine to set all metadata fields all the time, as ingest document holds their starting values + //before ingestion, which might also get modified during ingestion. + indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); + indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); + indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); + indexRequest.source(ingestDocument.getSourceAndMetadata()); + } catch (Exception e) { + totalStats.ingestFailed(); + pipelineStats.ifPresent(StatsHolder::ingestFailed); + throw e; + } finally { + long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); + totalStats.postIngest(ingestTimeInMillis); + pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); + } } private Pipeline getPipeline(String pipelineId) { @@ -121,4 +190,30 @@ public class PipelineExecutionService { } return pipeline; } + + static class StatsHolder { + + private final MeanMetric ingestMetric = new MeanMetric(); + private final CounterMetric ingestCurrent = new CounterMetric(); + private final CounterMetric ingestFailed = new CounterMetric(); + + void preIngest() { + ingestCurrent.inc(); + } + + void postIngest(long ingestTimeInMillis) { + ingestCurrent.dec(); + ingestMetric.inc(ingestTimeInMillis); + } + + void ingestFailed() { + ingestFailed.inc(); + } + + IngestStats.Stats createStats() { + return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/node/service/NodeService.java b/core/src/main/java/org/elasticsearch/node/service/NodeService.java index 88b2fe48868..7096b7cc56c 100644 --- a/core/src/main/java/org/elasticsearch/node/service/NodeService.java +++ b/core/src/main/java/org/elasticsearch/node/service/NodeService.java @@ -90,6 +90,7 @@ public class NodeService extends AbstractComponent implements Closeable { this.ingestService = new IngestService(settings, threadPool, processorsRegistryBuilder); this.settingsFilter = settingsFilter; clusterService.add(ingestService.getPipelineStore()); + clusterService.add(ingestService.getPipelineExecutionService()); } // can not use constructor injection or there will be a circular dependency @@ -165,13 +166,14 @@ public class NodeService extends AbstractComponent implements Closeable { httpServer == null ? null : httpServer.stats(), circuitBreakerService.stats(), scriptService.stats(), - discovery.stats() + discovery.stats(), + ingestService.getPipelineExecutionService().stats() ); } public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool, boolean fs, boolean transport, boolean http, boolean circuitBreaker, - boolean script, boolean discoveryStats) { + boolean script, boolean discoveryStats, boolean ingest) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) return new NodeStats(discovery.localNode(), System.currentTimeMillis(), @@ -185,7 +187,8 @@ public class NodeService extends AbstractComponent implements Closeable { http ? (httpServer == null ? null : httpServer.stats()) : null, circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, - discoveryStats ? discovery.stats() : null + discoveryStats ? discovery.stats() : null, + ingest ? ingestService.getPipelineExecutionService().stats() : null ); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index fb8e9c63740..1e2aece1646 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -81,6 +81,7 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.breaker(metrics.contains("breaker")); nodesStatsRequest.script(metrics.contains("script")); nodesStatsRequest.discovery(metrics.contains("discovery")); + nodesStatsRequest.ingest(metrics.contains("ingest")); // check for index specific metrics if (metrics.contains("indices")) { @@ -113,6 +114,6 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.indices().includeSegmentFileSizes(true); } - client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener(channel)); + client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index a74102f6969..c18a36e0142 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -143,11 +143,11 @@ public class DiskUsageTests extends ESTestCase { }; NodeStats[] nodeStats = new NodeStats[] { new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), + null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), + null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) + null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null, null) }; InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvaiableUsages, newMostAvaiableUsages); DiskUsage leastNode_1 = newLeastAvaiableUsages.get("node_1"); @@ -184,11 +184,11 @@ public class DiskUsageTests extends ESTestCase { }; NodeStats[] nodeStats = new NodeStats[] { new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), + null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), + null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null, null), new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, - null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) + null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null, null) }; InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages); DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1"); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index e644df2a83a..b84ba928be4 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.ingest.core.CompoundProcessor; import org.elasticsearch.ingest.core.IngestDocument; @@ -38,15 +39,16 @@ import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.eq; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.doAnswer; @@ -341,6 +343,43 @@ public class PipelineExecutionServiceTests extends ESTestCase { verify(completionHandler, times(1)).accept(null); } + public void testStats() throws Exception { + IngestStats ingestStats = executionService.stats(); + assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(0)); + assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestCurrent(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); + + when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, new CompoundProcessor())); + when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, new CompoundProcessor())); + + Map configurationMap = new HashMap<>(); + configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"))); + configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"))); + executionService.updatePipelineStats(new IngestMetadata(configurationMap)); + + Consumer failureHandler = mock(Consumer.class); + Consumer completionHandler = mock(Consumer.class); + + IndexRequest indexRequest = new IndexRequest("_index"); + indexRequest.setPipeline("_id1"); + executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + ingestStats = executionService.stats(); + assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(ingestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(ingestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); + assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(1L)); + + indexRequest.setPipeline("_id2"); + executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + ingestStats = executionService.stats(); + assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(ingestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(ingestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); + assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(2L)); + } + private IngestDocument eqID(String index, String type, String id, Map source) { return argThat(new IngestDocumentMatcher(index, type, id, source)); } diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index bcef61d4ef7..8a2f27112b5 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -60,6 +60,9 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`, `discovery`:: Statistics about the discovery +`ingest`:: + Statistics about ingest preprocessing + [source,js] -------------------------------------------------- # return indices and os @@ -227,3 +230,23 @@ curl -XGET 'http://localhost:9200/_nodes/stats?pretty&groups=_all' # Some groups from just the indices stats curl -XGET 'http://localhost:9200/_nodes/stats/indices?pretty&groups=foo,bar' -------------------------------------------------- + +[float] +[[ingest-stats]] +=== Ingest statistics + +The `ingest` flag can be set to retrieve statistics that concern ingest: + +`ingest.total.count`:: + The total number of document ingested during the lifetime of this node + +`ingest.total.time_in_millis`:: + The total time spent on ingest preprocessing documents during the lifetime of this node + +`ingest.total.current`:: + The total number of documents currently being ingested. + +`ingest.total.failed`:: + The total number ingest preprocessing operations failed during the lifetime of this node + +On top of these overall ingest statistics, these statistics are also provided on a per pipeline basis. \ No newline at end of file diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 0827baa6ea1..b314495b34a 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -634,6 +634,9 @@ plugin.mandatory: ingest-attachment,ingest-geoip A node will not start if either of these plugins are not available. +The <> can be used to fetch ingest usage statistics, globally and on a per +pipeline basis. Useful to find out which pipelines are used the most or spent the most time on preprocessing. + [[append-procesesor]] === Append Processor Appends one or more values to an existing array if the field already exists and it is an array. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml index b363f018667..3177c678206 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml @@ -68,6 +68,23 @@ setup: - is_false: _source.field1 - is_false: _source.field2 + - do: + cluster.state: {} + # Get master node id + - set: { master_node: master } + + - do: + nodes.stats: + metric: [ ingest ] + - gte: {nodes.$master.ingest.total.count: 1} + - gte: {nodes.$master.ingest.total.failed: 0} + - gte: {nodes.$master.ingest.total.time_in_millis: 0} + - match: {nodes.$master.ingest.total.current: 0} + - match: {nodes.$master.ingest.pipelines.pipeline1.count: 1} + - match: {nodes.$master.ingest.pipelines.pipeline1.failed: 0} + - gte: {nodes.$master.ingest.pipelines.pipeline1.time_in_millis: 0} + - match: {nodes.$master.ingest.pipelines.pipeline1.current: 0} + --- "Test bulk request with default pipeline": @@ -88,6 +105,23 @@ setup: - f1: v2 - gte: { ingest_took: 0 } + - do: + cluster.state: {} + # Get master node id + - set: { master_node: master } + + - do: + nodes.stats: + metric: [ ingest ] + - gte: {nodes.$master.ingest.total.count: 1} + - gte: {nodes.$master.ingest.total.failed: 0} + - gte: {nodes.$master.ingest.total.time_in_millis: 0} + - match: {nodes.$master.ingest.total.current: 0} + - match: {nodes.$master.ingest.pipelines.pipeline2.count: 1} + - match: {nodes.$master.ingest.pipelines.pipeline2.failed: 0} + - gte: {nodes.$master.ingest.pipelines.pipeline2.time_in_millis: 0} + - match: {nodes.$master.ingest.pipelines.pipeline2.current: 0} + - do: get: index: test_index diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 3e9b0c09cb2..c52c2d8cbf5 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -74,7 +74,7 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { null, null, null, null, null, fsInfo, null, null, null, - null, null); + null, null, null); } @Inject diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 43483f17117..f8dc889a6b6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1850,7 +1850,7 @@ public final class InternalTestCluster extends TestCluster { } NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node); - NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false); + NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false, false); assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L)); assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L)); assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); From 4219f8e06273d764ab935537f5a0cdd1dbaccda4 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 10 Mar 2016 14:09:04 +0100 Subject: [PATCH 31/37] Updated Java API docs with version recommendations Closes #15188 --- docs/java-api/client.asciidoc | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc index 7eea693d432..86b9b56cae7 100644 --- a/docs/java-api/client.asciidoc +++ b/docs/java-api/client.asciidoc @@ -12,12 +12,16 @@ Obtaining an elasticsearch `Client` is simple. The most common way to get a client is by creating a <> that connects to a cluster. -*Important:* -______________________________________________________________________________________________________________________________________________________________ -Please note that you are encouraged to use the same version on client -and cluster sides. You may hit some incompatibility issues when mixing -major versions. -______________________________________________________________________________________________________________________________________________________________ +[IMPORTANT] +============================== + +The client must have the same major version (e.g. `2.x`, or `5.x`) as the +nodes in the cluster. Clients may connect to clusters which have a different +minor version (e.g. `2.3.x`) but it is possible that new funcionality may not +be supported. Ideally, the client should have the same version as the +cluster. + +============================== [[transport-client]] From b8db32b7fdda20c9888e07d129a129504c987401 Mon Sep 17 00:00:00 2001 From: Andrew Cholakian Date: Wed, 2 Dec 2015 18:01:52 -0600 Subject: [PATCH 32/37] Improved transport sniffing docs Closes #15204 --- docs/java-api/client.asciidoc | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc index 86b9b56cae7..7af8c230269 100644 --- a/docs/java-api/client.asciidoc +++ b/docs/java-api/client.asciidoc @@ -57,11 +57,23 @@ Client client = TransportClient.builder().settings(settings).build(); //Add transport addresses and do something with the client... -------------------------------------------------- -The client allows sniffing the rest of the cluster, which adds data nodes -into its list of machines to use. In this case, note that the IP addresses -used will be the ones that the other nodes were started with (the -"publish" address). In order to enable it, set the -`client.transport.sniff` to `true`: +The Transport client comes with a cluster sniffing feature which +allows it to dynamically add new hosts and remove old ones. +When sniffing is enabled the the transport client will connect to the nodes in its +internal node list, which is built via calls to addTransportAddress. +After this, the client will call the internal cluster state API on those nodes +to discover available data nodes. The internal node list of the client will +be replaced with those data nodes only. This list is refreshed every five seconds by default. +Note that the IP addresses the sniffer connects to are the ones declared as the 'publish' +address in those node's elasticsearch config. + +Keep in mind that list might possibly not include the original node it connected to +if that node is not a data node. If, for instance, you initially connect to a +master node, after sniffing no further requests will go to that master node, +but rather to any data nodes instead. The reason the transport excludes non-data +nodes is to avoid sending search traffic to master only nodes. + +In order to enable sniffing, set `client.transport.sniff` to `true`: [source,java] -------------------------------------------------- From f7a2dbfcaf12a9e6cd6547c909a6cd44fb45fb19 Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Thu, 10 Mar 2016 07:28:13 -0600 Subject: [PATCH 33/37] fixing silly typo in docs --- docs/reference/migration/migrate_2_2.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/migration/migrate_2_2.asciidoc b/docs/reference/migration/migrate_2_2.asciidoc index 9611d86a2ac..8772c1017b0 100644 --- a/docs/reference/migration/migrate_2_2.asciidoc +++ b/docs/reference/migration/migrate_2_2.asciidoc @@ -10,7 +10,7 @@ your application to Elasticsearch 2.2. ==== Geo Point Type The `geo_point` format has been changed to reduce index size and the time required to both index and query -geo point data. To make these performance improvements possible both `doc_values` are `coerce` are required +geo point data. To make these performance improvements possible both `doc_values` and `coerce` are required and therefore cannot be changed. For this reason the `doc_values` and `coerce` parameters have been removed from the <> field mapping. From 046212035ce598300175bec36fad70aafc209f93 Mon Sep 17 00:00:00 2001 From: Robin Clarke Date: Thu, 5 Nov 2015 14:40:06 +0100 Subject: [PATCH 34/37] Clarification about precedence of settings Closes #14559 --- docs/reference/cluster/update-settings.asciidoc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 8ec58424730..d0b127cd352 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -83,6 +83,16 @@ Cluster wide settings can be returned using: curl -XGET localhost:9200/_cluster/settings -------------------------------------------------- +[float] +=== Precedence of settings + +Transient cluster settings take precedence over persistent cluster settings, +which take precedence over settings configured in the `elasticsearch.yml` +config file. + +For this reason it is preferrable to use the `elasticsearch.yml` file only +for local configurations, and set all cluster-wider settings with the +`settings` API. A list of dynamically updatable settings can be found in the <> documentation. From 2fa573bc581e9b9676c3297dbd117dee83f400f8 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 10 Mar 2016 14:34:05 +0100 Subject: [PATCH 35/37] Missing word in docs --- docs/reference/mapping/params/store.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index b81208aed77..46d57e9d8b5 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -1,7 +1,7 @@ [[mapping-store]] === `store` -By default, field values <> to make them searchable, +By default, field values are <> to make them searchable, but they are not _stored_. This means that the field can be queried, but the original field value cannot be retrieved. From 0bbb84c19a0ff2ad974bb2bd8002871e4ee1ac73 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 10 Mar 2016 15:18:08 +0100 Subject: [PATCH 36/37] test: 'Test bulk request with default pipeline' may get run first and then the total ingest count for pipeline1 is 2. --- .../src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml index 3177c678206..63cb42f1035 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/ingest/70_bulk.yaml @@ -80,7 +80,7 @@ setup: - gte: {nodes.$master.ingest.total.failed: 0} - gte: {nodes.$master.ingest.total.time_in_millis: 0} - match: {nodes.$master.ingest.total.current: 0} - - match: {nodes.$master.ingest.pipelines.pipeline1.count: 1} + - gte: {nodes.$master.ingest.pipelines.pipeline1.count: 1} - match: {nodes.$master.ingest.pipelines.pipeline1.failed: 0} - gte: {nodes.$master.ingest.pipelines.pipeline1.time_in_millis: 0} - match: {nodes.$master.ingest.pipelines.pipeline1.current: 0} From 22e716551bb87b4b28da46b949b317c69f9b4a1b Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 3 Mar 2016 11:44:56 -0700 Subject: [PATCH 37/37] Add -XX+AlwaysPreTouch JVM flag Enables the touching of all memory pages used by the JVM heap spaces during initialization of the HotSpot VM, which commits all memory pages at initialization time. By default, pages are committed only as they are needed. --- .../main/resources/bin/elasticsearch.in.bat | Bin 3232 -> 3369 bytes .../main/resources/bin/elasticsearch.in.sh | 4 ++++ docs/reference/migration/migrate_5_0.asciidoc | 9 +++++++++ 3 files changed, 13 insertions(+) diff --git a/distribution/src/main/resources/bin/elasticsearch.in.bat b/distribution/src/main/resources/bin/elasticsearch.in.bat index 7138cf5f5cacd970d9b2f183b81cd52572e723fe..b909a464952737b0ca1382961fb54c42d9bf53d2 100644 GIT binary patch delta 119 zcmZ1=xl(Gw7tY|sq?}ZRf}&L2lKj%-jLf`rh5R&y+|=CsqDqB=#Prl+h0@~G6osTp zg_4X^1+Oq)g_P1Fkg|;YlH!8=60RUuUxm!P%#zH+oXo1klFakKG T6Dx}Yic&*BW^aDOxrGS;ZWJt# delta 12 TcmZ1}wLo&i7tYPKTx*yBBZLJK diff --git a/distribution/src/main/resources/bin/elasticsearch.in.sh b/distribution/src/main/resources/bin/elasticsearch.in.sh index f859a06ffab..69d2fc94112 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.sh +++ b/distribution/src/main/resources/bin/elasticsearch.in.sh @@ -81,6 +81,10 @@ JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError" # Disables explicit GC JAVA_OPTS="$JAVA_OPTS -XX:+DisableExplicitGC" +# Enable pre-touching of memory pages used by the JVM during hotspot +# initialization +JAVA_OPTS="$JAVA_OPTS -XX:+AlwaysPreTouch" + # Ensure UTF-8 encoding by default (e.g. filenames) JAVA_OPTS="$JAVA_OPTS -Dfile.encoding=UTF-8" diff --git a/docs/reference/migration/migrate_5_0.asciidoc b/docs/reference/migration/migrate_5_0.asciidoc index 6fcf566fdcb..37c73698779 100644 --- a/docs/reference/migration/migrate_5_0.asciidoc +++ b/docs/reference/migration/migrate_5_0.asciidoc @@ -808,6 +808,15 @@ changed to now route standard output to the journal and standard error to inherit this setting (these are the defaults for systemd). These settings can be modified by editing the elasticsearch.service file. +==== Longer startup times + +In Elasticsearch 5.0.0 the `-XX:+AlwaysPreTouch` flag has been added to the JVM +startup options. This option touches all memory pages used by the JVM heap +during initialization of the HotSpot VM to reduce the chance of having to commit +a memory page during GC time. This will increase the startup time of +Elasticsearch as well as increasing the initial resident memory usage of the +Java process. + [[breaking_50_scripting]] === Scripting