diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 5a508fa1065..44687f154ff 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -34,6 +34,10 @@ apply plugin: 'com.github.johnrengelman.shadow' // have the shadow plugin provide the runShadow task apply plugin: 'application' +// Not published so no need to assemble +tasks.remove(assemble) +build.dependsOn.remove('assemble') + archivesBaseName = 'elasticsearch-benchmarks' mainClassName = 'org.openjdk.jmh.Main' diff --git a/build.gradle b/build.gradle index 00d1730a26c..05baaebb276 100644 --- a/build.gradle +++ b/build.gradle @@ -152,10 +152,28 @@ task verifyVersions { } } +/* + * When adding backcompat behavior that spans major versions, temporarily + * disabling the backcompat tests is necessary. This flag controls + * the enabled state of every bwc task. It should be set back to true + * after the backport of the backcompat code is complete. + */ +allprojects { + ext.bwc_tests_enabled = true +} + +task verifyBwcTestsEnabled { + doLast { + if (project.bwc_tests_enabled == false) { + throw new GradleException('Bwc tests are disabled. They must be re-enabled after completing backcompat behavior backporting.') + } + } +} + task branchConsistency { description 'Ensures this branch is internally consistent. For example, that versions constants match released versions.' group 'Verification' - dependsOn verifyVersions + dependsOn verifyVersions, verifyBwcTestsEnabled } subprojects { @@ -402,3 +420,17 @@ task run(type: Run) { group = 'Verification' impliesSubProjects = true } + +/* Remove assemble on all qa projects because we don't need to publish + * artifacts for them. */ +gradle.projectsEvaluated { + subprojects { + if (project.path.startsWith(':qa')) { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index af7716804bf..bafda0afc1b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -394,8 +394,11 @@ class BuildPlugin implements Plugin { project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom t -> // place the pom next to the jar it is for t.destination = new File(project.buildDir, "distributions/${project.archivesBaseName}-${project.version}.pom") - // build poms with assemble - project.assemble.dependsOn(t) + // build poms with assemble (if the assemble task exists) + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + assemble.dependsOn(t) + } } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 66f9f0d4c4e..d2802638ce5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -32,6 +32,9 @@ public class DocsTestPlugin extends RestTestPlugin { public void apply(Project project) { project.pluginManager.apply('elasticsearch.standalone-rest-test') super.apply(project) + // Docs are published separately so no need to assemble + project.tasks.remove(project.assemble) + project.build.dependsOn.remove('assemble') Map defaultSubstitutions = [ /* These match up with the asciidoc syntax for substitutions but * the values may differ. In particular {version} needs to resolve diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index f126839a8d4..0395b31786f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -127,6 +127,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { */ Set unconvertedCandidates = new HashSet<>() + /** + * The last non-TESTRESPONSE snippet. + */ + Snippet previousTest + /** * Called each time a snippet is encountered. Tracks the snippets and * calls buildTest to actually build the test. @@ -142,6 +147,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { } if (snippet.testSetup) { setup(snippet) + previousTest = snippet return } if (snippet.testResponse) { @@ -150,6 +156,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { } if (snippet.test || snippet.console) { test(snippet) + previousTest = snippet return } // Must be an unmarked snippet.... @@ -158,7 +165,18 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { private void test(Snippet test) { setupCurrent(test) - if (false == test.continued) { + if (test.continued) { + /* Catch some difficult to debug errors with // TEST[continued] + * and throw a helpful error message. */ + if (previousTest == null || previousTest.path != test.path) { + throw new InvalidUserDataException("// TEST[continued] " + + "cannot be on first snippet in a file: $test") + } + if (previousTest != null && previousTest.testSetup) { + throw new InvalidUserDataException("// TEST[continued] " + + "cannot immediately follow // TESTSETUP: $test") + } + } else { current.println('---') current.println("\"line_$test.start\":") /* The Elasticsearch test runner doesn't support the warnings diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 46542708420..1c62e008eed 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -19,6 +19,7 @@ package org.elasticsearch.gradle.test import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.Version import org.gradle.api.InvalidUserDataException import org.gradle.api.Project @@ -143,7 +144,7 @@ class NodeInfo { args.add("${esScript}") } - env = [ 'JAVA_HOME' : project.javaHome ] + env = ['JAVA_HOME': project.javaHome] args.addAll("-E", "node.portsfile=true") String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs @@ -158,7 +159,11 @@ class NodeInfo { } } env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options')) - args.addAll("-E", "path.conf=${confDir}") + if (Version.fromString(nodeVersion).major == 5) { + args.addAll("-E", "path.conf=${confDir}") + } else { + args.addAll("--path.conf", "${confDir}") + } if (!System.properties.containsKey("tests.es.path.data")) { args.addAll("-E", "path.data=${-> dataDir.toString()}") } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 678155c6561..9b86a207af5 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -13,12 +13,14 @@ + + + - @@ -34,7 +36,6 @@ - @@ -144,7 +145,6 @@ - @@ -254,8 +254,6 @@ - - @@ -272,7 +270,6 @@ - @@ -343,7 +340,6 @@ - @@ -378,7 +374,6 @@ - @@ -410,7 +405,6 @@ - @@ -544,7 +538,6 @@ - @@ -571,7 +564,6 @@ - @@ -579,7 +571,6 @@ - @@ -611,7 +602,6 @@ - @@ -679,7 +669,6 @@ - @@ -702,7 +691,6 @@ - @@ -744,13 +732,11 @@ - - @@ -793,4 +779,4 @@ - \ No newline at end of file + diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e7243b9dad9..bec919cf0f6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,6 +1,6 @@ # When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy elasticsearch = 6.0.0-alpha3 -lucene = 7.0.0-snapshot-a0aef2f +lucene = 7.0.0-snapshot-ad2cb77 # optional dependencies spatial4j = 0.6 @@ -25,7 +25,7 @@ commonscodec = 1.10 hamcrest = 1.3 securemock = 1.2 # When updating mocksocket, please also update core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy -mocksocket = 1.1 +mocksocket = 1.2 # benchmark dependencies jmh = 1.17.3 diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index d8a9105fae9..07186c80270 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -37,6 +37,10 @@ apply plugin: 'application' group = 'org.elasticsearch.client' +// Not published so no need to assemble +tasks.remove(assemble) +build.dependsOn.remove('assemble') + archivesBaseName = 'client-benchmarks' mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain' diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index a0d52f15916..bee41034c3c 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -27,9 +27,12 @@ esplugin { classname 'org.elasticsearch.plugin.noop.NoopPlugin' } +// Not published so no need to assemble +tasks.remove(assemble) +build.dependsOn.remove('assemble') + compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" // no unit tests test.enabled = false integTest.enabled = false - diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index a354bdfb7ba..bdf2bf918d0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -49,8 +49,7 @@ import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder; -import org.elasticsearch.join.aggregations.ParsedChildren; +import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.Aggregation; @@ -92,8 +91,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms; import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms; import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; -import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.matrix.stats.ParsedMatrixStats; import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg; import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; @@ -142,11 +139,13 @@ import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestion; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.ServiceLoader; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -180,8 +179,9 @@ public class RestHighLevelClient { */ protected RestHighLevelClient(RestClient restClient, List namedXContentEntries) { this.client = Objects.requireNonNull(restClient); - this.registry = new NamedXContentRegistry(Stream.of(getDefaultNamedXContents().stream(), namedXContentEntries.stream()) - .flatMap(Function.identity()).collect(toList())); + this.registry = new NamedXContentRegistry( + Stream.of(getDefaultNamedXContents().stream(), getProvidedNamedXContents().stream(), namedXContentEntries.stream()) + .flatMap(Function.identity()).collect(toList())); } /** @@ -395,6 +395,10 @@ public class RestHighLevelClient { try { return responseConverter.apply(e.getResponse()); } catch (Exception innerException) { + //the exception is ignored as we now try to parse the response as an error. + //this covers cases like get where 404 can either be a valid document not found response, + //or an error for which parsing is completely different. We try to consider the 404 response as a valid one + //first. If parsing of the response breaks, we fall back to parsing it as an error. throw parseResponseException(e); } } @@ -566,8 +570,6 @@ public class RestHighLevelClient { map.put(SignificantLongTerms.NAME, (p, c) -> ParsedSignificantLongTerms.fromXContent(p, (String) c)); map.put(SignificantStringTerms.NAME, (p, c) -> ParsedSignificantStringTerms.fromXContent(p, (String) c)); map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c)); - map.put(ChildrenAggregationBuilder.NAME, (p, c) -> ParsedChildren.fromXContent(p, (String) c)); - map.put(MatrixStatsAggregationBuilder.NAME, (p, c) -> ParsedMatrixStats.fromXContent(p, (String) c)); List entries = map.entrySet().stream() .map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue())) .collect(Collectors.toList()); @@ -579,4 +581,15 @@ public class RestHighLevelClient { (parser, context) -> CompletionSuggestion.fromXContent(parser, (String)context))); return entries; } + + /** + * Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins. + */ + static List getProvidedNamedXContents() { + List entries = new ArrayList<>(); + for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) { + entries.addAll(service.getNamedXContentParsers()); + } + return entries; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 7fc0733a7f0..bbc973e2315 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -56,10 +56,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.cbor.CborXContent; import org.elasticsearch.common.xcontent.smile.SmileXContent; +import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -69,6 +71,7 @@ import org.mockito.internal.matchers.VarargMatcher; import java.io.IOException; import java.net.SocketTimeoutException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -613,9 +616,9 @@ public class RestHighLevelClientTests extends ESTestCase { assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage()); } - public void testNamedXContents() { + public void testDefaultNamedXContents() { List namedXContents = RestHighLevelClient.getDefaultNamedXContents(); - assertEquals(45, namedXContents.size()); + assertEquals(43, namedXContents.size()); Map, Integer> categories = new HashMap<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { Integer counter = categories.putIfAbsent(namedXContent.categoryClass, 1); @@ -624,10 +627,28 @@ public class RestHighLevelClientTests extends ESTestCase { } } assertEquals(2, categories.size()); - assertEquals(Integer.valueOf(42), categories.get(Aggregation.class)); + assertEquals(Integer.valueOf(40), categories.get(Aggregation.class)); assertEquals(Integer.valueOf(3), categories.get(Suggest.Suggestion.class)); } + public void testProvidedNamedXContents() { + List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); + assertEquals(2, namedXContents.size()); + Map, Integer> categories = new HashMap<>(); + List names = new ArrayList<>(); + for (NamedXContentRegistry.Entry namedXContent : namedXContents) { + names.add(namedXContent.name.getPreferredName()); + Integer counter = categories.putIfAbsent(namedXContent.categoryClass, 1); + if (counter != null) { + categories.put(namedXContent.categoryClass, counter + 1); + } + } + assertEquals(1, categories.size()); + assertEquals(Integer.valueOf(2), categories.get(Aggregation.class)); + assertTrue(names.contains(ChildrenAggregationBuilder.NAME)); + assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME)); + } + private static class TrackingActionListener implements ActionListener { private final AtomicInteger statusCode = new AtomicInteger(-1); private final AtomicReference exception = new AtomicReference<>(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 328f2ee32f5..8dad369cacb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -276,19 +276,20 @@ public class SearchIT extends ESRestHighLevelClientTestCase { } public void testSearchWithParentJoin() throws IOException { + final String indexName = "child_example"; StringEntity parentMapping = new StringEntity("{\n" + " \"mappings\": {\n" + - " \"answer\" : {\n" + - " \"_parent\" : {\n" + - " \"type\" : \"question\"\n" + + " \"qa\" : {\n" + + " \"properties\" : {\n" + + " \"qa_join_field\" : {\n" + + " \"type\" : \"join\",\n" + + " \"relations\" : { \"question\" : \"answer\" }\n" + + " }\n" + " }\n" + " }\n" + - " },\n" + - " \"settings\": {\n" + - " \"index.mapping.single_type\": false" + - " }\n" + + " }" + "}", ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/child_example", Collections.emptyMap(), parentMapping); + client().performRequest("PUT", "/" + indexName, Collections.emptyMap(), parentMapping); StringEntity questionDoc = new StringEntity("{\n" + " \"body\": \"

I have Windows 2003 server and i bought a new Windows 2008 server...\",\n" + " \"title\": \"Whats the best way to file transfer my site from server to a newer one?\",\n" + @@ -296,9 +297,10 @@ public class SearchIT extends ESRestHighLevelClientTestCase { " \"windows-server-2003\",\n" + " \"windows-server-2008\",\n" + " \"file-transfer\"\n" + - " ]\n" + + " ],\n" + + " \"qa_join_field\" : \"question\"\n" + "}", ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/child_example/question/1", Collections.emptyMap(), questionDoc); + client().performRequest("PUT", "/" + indexName + "/qa/1", Collections.emptyMap(), questionDoc); StringEntity answerDoc1 = new StringEntity("{\n" + " \"owner\": {\n" + " \"location\": \"Norfolk, United Kingdom\",\n" + @@ -306,9 +308,13 @@ public class SearchIT extends ESRestHighLevelClientTestCase { " \"id\": 48\n" + " },\n" + " \"body\": \"

Unfortunately you're pretty much limited to FTP...\",\n" + + " \"qa_join_field\" : {\n" + + " \"name\" : \"answer\",\n" + + " \"parent\" : \"1\"\n" + + " },\n" + " \"creation_date\": \"2009-05-04T13:45:37.030\"\n" + "}", ContentType.APPLICATION_JSON); - client().performRequest("PUT", "child_example/answer/1", Collections.singletonMap("parent", "1"), answerDoc1); + client().performRequest("PUT", "/" + indexName + "/qa/2", Collections.singletonMap("routing", "1"), answerDoc1); StringEntity answerDoc2 = new StringEntity("{\n" + " \"owner\": {\n" + " \"location\": \"Norfolk, United Kingdom\",\n" + @@ -316,9 +322,13 @@ public class SearchIT extends ESRestHighLevelClientTestCase { " \"id\": 49\n" + " },\n" + " \"body\": \"

Use Linux...\",\n" + + " \"qa_join_field\" : {\n" + + " \"name\" : \"answer\",\n" + + " \"parent\" : \"1\"\n" + + " },\n" + " \"creation_date\": \"2009-05-05T13:45:37.030\"\n" + "}", ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/child_example/answer/2", Collections.singletonMap("parent", "1"), answerDoc2); + client().performRequest("PUT", "/" + indexName + "/qa/3", Collections.singletonMap("routing", "1"), answerDoc2); client().performRequest("POST", "/_refresh"); TermsAggregationBuilder leafTermAgg = new TermsAggregationBuilder("top-names", ValueType.STRING) @@ -328,7 +338,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { .size(10).subAggregation(childrenAgg); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.size(0).aggregation(termsAgg); - SearchRequest searchRequest = new SearchRequest("child_example"); + SearchRequest searchRequest = new SearchRequest(indexName); searchRequest.source(searchSourceBuilder); SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index ba3a07454ee..cc0f1b30896 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -553,7 +553,7 @@ public class RestClient implements Closeable { return httpRequest; } - private static URI buildUri(String pathPrefix, String path, Map params) { + static URI buildUri(String pathPrefix, String path, Map params) { Objects.requireNonNull(path, "path must not be null"); try { String fullPath; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index d8c297ed099..6978aab58fe 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -23,6 +23,9 @@ import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import java.net.URI; +import java.util.Collections; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -77,6 +80,22 @@ public class RestClientTests extends RestClientTestCase { } } + public void testBuildUriLeavesPathUntouched() { + { + URI uri = RestClient.buildUri("/foo$bar", "/index/type/id", Collections.emptyMap()); + assertEquals("/foo$bar/index/type/id", uri.getPath()); + } + { + URI uri = RestClient.buildUri(null, "/foo$bar/ty/pe/i/d", Collections.emptyMap()); + assertEquals("/foo$bar/ty/pe/i/d", uri.getPath()); + } + { + URI uri = RestClient.buildUri(null, "/index/type/id", Collections.singletonMap("foo$bar", "x/y/z")); + assertEquals("/index/type/id", uri.getPath()); + assertEquals("foo$bar=x/y/z", uri.getQuery()); + } + } + private static RestClient createRestClient() { HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[]{}, hosts, null, null); diff --git a/core/licenses/lucene-analyzers-common-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-analyzers-common-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 9a1f65be58f..00000000000 --- a/core/licenses/lucene-analyzers-common-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e191674c50c9d99c9838da52cbf67c411998f4e \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-analyzers-common-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..6d1791e8e41 --- /dev/null +++ b/core/licenses/lucene-analyzers-common-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +00d3260223eac0405a82eeeb8439de0e5eb5f888 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-backward-codecs-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 8ffb313c694..00000000000 --- a/core/licenses/lucene-backward-codecs-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -45bc34ab640d5d1a7491b523631b902f20db5384 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-backward-codecs-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..d20323f264c --- /dev/null +++ b/core/licenses/lucene-backward-codecs-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +3a698989219afd9150738899bc849075c102881b \ No newline at end of file diff --git a/core/licenses/lucene-core-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-core-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 220b0ea5212..00000000000 --- a/core/licenses/lucene-core-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b44d86e9077443c3ba4918a85603734461c6b448 \ No newline at end of file diff --git a/core/licenses/lucene-core-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-core-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..fbb658a70cf --- /dev/null +++ b/core/licenses/lucene-core-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +bb636d31949418943454dbe2d72b9b66cd743f9f \ No newline at end of file diff --git a/core/licenses/lucene-grouping-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-grouping-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 99612cc3409..00000000000 --- a/core/licenses/lucene-grouping-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -409b616d40e2041a02890b2dc477ed845e3121e9 \ No newline at end of file diff --git a/core/licenses/lucene-grouping-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-grouping-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..dac32fca814 --- /dev/null +++ b/core/licenses/lucene-grouping-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +720252d786273edcc48b2ae7b380bc229fe8930c \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-highlighter-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index a3bd96546f3..00000000000 --- a/core/licenses/lucene-highlighter-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cfac105541315e2ca54955f681b410a7aa3bbb9d \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-highlighter-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..83753439b07 --- /dev/null +++ b/core/licenses/lucene-highlighter-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +735178c26f3eb361c30657beeec9e57bd5548d58 \ No newline at end of file diff --git a/core/licenses/lucene-join-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-join-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 92c0c80f6a4..00000000000 --- a/core/licenses/lucene-join-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -993c1331130dd26c632b964fd8caac259bb9f3fc \ No newline at end of file diff --git a/core/licenses/lucene-join-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-join-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..68c8dbc5491 --- /dev/null +++ b/core/licenses/lucene-join-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +de5e5cd9b00be4d005d0e51c74084be6c07b0bbd \ No newline at end of file diff --git a/core/licenses/lucene-memory-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-memory-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 6de623ae884..00000000000 --- a/core/licenses/lucene-memory-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec1460a28850410112a6349a7fff27df31242295 \ No newline at end of file diff --git a/core/licenses/lucene-memory-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-memory-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..61fcd225c25 --- /dev/null +++ b/core/licenses/lucene-memory-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +796ca5e5a9af3cc21f50156fa7e614338ec15ceb \ No newline at end of file diff --git a/core/licenses/lucene-misc-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-misc-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index fd7a6b53d34..00000000000 --- a/core/licenses/lucene-misc-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -57d342dbe68cf05361ccfda6bb76f2410cac900b \ No newline at end of file diff --git a/core/licenses/lucene-misc-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-misc-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..df88e725a25 --- /dev/null +++ b/core/licenses/lucene-misc-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +7ba802083c4c97a07d9487c2b26ee39e4f8e3c7e \ No newline at end of file diff --git a/core/licenses/lucene-queries-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-queries-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index e04c283d0fa..00000000000 --- a/core/licenses/lucene-queries-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ed10847b6a2353ac66decd5a2ee1a1d34353049 \ No newline at end of file diff --git a/core/licenses/lucene-queries-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-queries-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..55de2e60d3b --- /dev/null +++ b/core/licenses/lucene-queries-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +d66adfdb3f330b726420db5f8db21b17a0d9991d \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-queryparser-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 87871dc29d5..00000000000 --- a/core/licenses/lucene-queryparser-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -23ce6c2ea59287d8fe4fe31f466e9a58a1efe7b5 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-queryparser-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..f77ac55a05d --- /dev/null +++ b/core/licenses/lucene-queryparser-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +569c6362cb87858fc282fd786ba0fda0c44f0a8b \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-sandbox-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index ea065b272cf..00000000000 --- a/core/licenses/lucene-sandbox-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78bda71c8e65428927136f81112a031aa9cd04d4 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-sandbox-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..b242c766271 --- /dev/null +++ b/core/licenses/lucene-sandbox-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +0ba62e91082910b1057027b8912395da670105d0 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-spatial-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index c623088ce2a..00000000000 --- a/core/licenses/lucene-spatial-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e7ea95e6197176015b13551c7496be4867ede45 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-spatial-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..124c8c5a3d6 --- /dev/null +++ b/core/licenses/lucene-spatial-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +968e678dc4a236bbc8e4c2eb66f5702ea48aae10 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-spatial-extras-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index e51de2208ee..00000000000 --- a/core/licenses/lucene-spatial-extras-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ae4ecd6c478456395ae9a3f954b8afc13629bb9 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-spatial-extras-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..6ebe92a6ab6 --- /dev/null +++ b/core/licenses/lucene-spatial-extras-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +579670cc27104fdbd627959b7982a99eab1d16d1 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-spatial3d-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 25d042e923a..00000000000 --- a/core/licenses/lucene-spatial3d-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5d1a81fc290b9660a49557f848dc2a3c4f2048b \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-spatial3d-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..cf9ba1ac90a --- /dev/null +++ b/core/licenses/lucene-spatial3d-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +53f3fc06ed3357dc75d7b050172520aa86d41010 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-7.0.0-snapshot-a0aef2f.jar.sha1 b/core/licenses/lucene-suggest-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 5ac114c4547..00000000000 --- a/core/licenses/lucene-suggest-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d77cdd8f2782062a3b4c319c64f0fa4d804aafed \ No newline at end of file diff --git a/core/licenses/lucene-suggest-7.0.0-snapshot-ad2cb77.jar.sha1 b/core/licenses/lucene-suggest-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..dcab47e967f --- /dev/null +++ b/core/licenses/lucene-suggest-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +5281aa095f4f46580ea2008ffd040733096d0246 \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 288a52a0a1f..d4de9255562 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -78,6 +78,10 @@ public class Version implements Comparable { public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0); public static final int V_5_4_1_ID = 5040199; public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); + public static final int V_5_4_2_ID = 5040299; + public static final Version V_5_4_2 = new Version(V_5_4_2_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); + public static final int V_5_4_3_ID = 5040399; + public static final Version V_5_4_3 = new Version(V_5_4_3_ID, org.apache.lucene.util.Version.LUCENE_6_5_1); public static final int V_5_5_0_ID = 5050099; public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0); public static final int V_5_6_0_ID = 5060099; @@ -116,6 +120,10 @@ public class Version implements Comparable { return V_5_6_0; case V_5_5_0_ID: return V_5_5_0; + case V_5_4_3_ID: + return V_5_4_3; + case V_5_4_2_ID: + return V_5_4_2; case V_5_4_1_ID: return V_5_4_1; case V_5_4_0_ID: diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 64f63025279..6b1cf09bd73 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -43,8 +43,6 @@ import java.net.URLEncoder; import java.util.Locale; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * A base class for the response of a write operation that involves a single doc @@ -351,17 +349,15 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr context.setSeqNo(parser.longValue()); } else if (_PRIMARY_TERM.equals(currentFieldName)) { context.setPrimaryTerm(parser.longValue()); - } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if (_SHARDS.equals(currentFieldName)) { context.setShardInfo(ShardInfo.fromXContent(parser)); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); // skip potential inner objects for forward compatibility } - } else { - throwUnknownToken(token, parser.getTokenLocation()); + } else if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); // skip potential inner arrays for forward compatibility } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index e90f9e578ce..32c9493b440 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -37,6 +37,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest public static final String ALL_SNAPSHOTS = "_all"; public static final String CURRENT_SNAPSHOT = "_current"; + public static final boolean DEFAULT_VERBOSE_MODE = true; private String repository; @@ -44,7 +45,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest private boolean ignoreUnavailable; - private boolean verbose = true; + private boolean verbose = DEFAULT_VERBOSE_MODE; public GetSnapshotsRequest() { } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 11566378085..b7da50139bb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.CustomAnalyzer; +import org.elasticsearch.index.analysis.CustomAnalyzerProvider; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; @@ -183,13 +184,14 @@ public class TransportAnalyzeAction extends TransportSingleShardAction tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers, analysisRegistry, environment); - TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0]; - tokenFilterFactories = getTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenFilterFactories); + List charFilterFactoryList = parseCharFilterFactories(request, indexSettings, analysisRegistry, environment); - CharFilterFactory[] charFilterFactories = new CharFilterFactory[0]; - charFilterFactories = getCharFilterFactories(request, indexSettings, analysisRegistry, environment, charFilterFactories); + List tokenFilterFactoryList = parseTokenFilterFactories(request, indexSettings, analysisRegistry, + environment, tokenizerFactory, charFilterFactoryList); - analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), charFilterFactories, tokenFilterFactories); + analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), + charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]), + tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()])); closeAnalyzer = true; } else if (analyzer == null) { if (indexAnalyzers == null) { @@ -462,12 +464,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, + Environment environment) throws IOException { + List charFilterFactoryList = new ArrayList<>(); if (request.charFilters() != null && request.charFilters().size() > 0) { - charFilterFactories = new CharFilterFactory[request.charFilters().size()]; - for (int i = 0; i < request.charFilters().size(); i++) { - final AnalyzeRequest.NameOrDefinition charFilter = request.charFilters().get(i); + List charFilters = request.charFilters(); + for (AnalyzeRequest.NameOrDefinition charFilter : charFilters) { + CharFilterFactory charFilterFactory; // parse anonymous settings if (charFilter.definition != null) { Settings settings = getAnonymousSettings(charFilter.definition); @@ -481,7 +484,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction charFilterFactoryFactory; if (indexSettings == null) { @@ -489,31 +492,34 @@ public class TransportAnalyzeAction extends TransportSingleShardAction parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, + Environment environment, Tuple tokenizerFactory, + List charFilterFactoryList) throws IOException { + List tokenFilterFactoryList = new ArrayList<>(); if (request.tokenFilters() != null && request.tokenFilters().size() > 0) { - tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()]; - for (int i = 0; i < request.tokenFilters().size(); i++) { - final AnalyzeRequest.NameOrDefinition tokenFilter = request.tokenFilters().get(i); + List tokenFilters = request.tokenFilters(); + for (AnalyzeRequest.NameOrDefinition tokenFilter : tokenFilters) { + TokenFilterFactory tokenFilterFactory; // parse anonymous settings if (tokenFilter.definition != null) { Settings settings = getAnonymousSettings(tokenFilter.definition); @@ -527,7 +533,11 @@ public class TransportAnalyzeAction extends TransportSingleShardAction tokenFilterFactoryFactory; if (indexSettings == null) { @@ -535,23 +545,26 @@ public class TransportAnalyzeAction extends TransportSingleShardAction parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers, diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index f1d7d38f6ac..249d22e7c5b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -38,7 +38,7 @@ public class DeleteIndexRequest extends AcknowledgedRequest private String[] indices; // Delete index should work by default on both open and closed indices. - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true); public DeleteIndexRequest() { } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java index 5c2a2b166bc..9519f0f9fcf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateResponse.java @@ -32,7 +32,7 @@ public class DeleteIndexTemplateResponse extends AcknowledgedResponse { DeleteIndexTemplateResponse() { } - DeleteIndexTemplateResponse(boolean acknowledged) { + protected DeleteIndexTemplateResponse(boolean acknowledged) { super(acknowledged); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResultHolder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResultHolder.java index e844f8d6506..3e7ee41b914 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResultHolder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResultHolder.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.VersionConflictEngineException; /** * A struct-like holder for a bulk items reponse, result, and the resulting @@ -39,4 +40,9 @@ class BulkItemResultHolder { this.operationResult = operationResult; this.replicaRequest = replicaRequest; } + + public boolean isVersionConflict() { + return operationResult == null ? false : + operationResult.getFailure() instanceof VersionConflictEngineException; + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java b/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java index 812653d5826..7f16b7c4d6d 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java @@ -27,13 +27,13 @@ public interface MappingUpdatePerformer { /** * Update the mappings on the master. */ - void updateMappings(Mapping update, ShardId shardId, String type) throws Exception; + void updateMappings(Mapping update, ShardId shardId, String type); /** * Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the operation needs to be * retried on the primary due to the mappings not being present yet, or a different exception if * updating the mappings on the master failed. */ - void verifyMappings(Mapping update, ShardId shardId) throws Exception; + void verifyMappings(Mapping update, ShardId shardId); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 7a2c5eb0222..9df64699b98 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; +import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; @@ -50,7 +51,6 @@ import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; @@ -66,7 +66,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.Map; import java.util.function.LongSupplier; @@ -265,131 +264,150 @@ public class TransportShardBulkAction extends TransportWriteAction 0)) { + final BytesReference indexSourceAsBytes = updateIndexRequest.source(); + final Tuple> sourceAndContent = + XContentHelper.convertToMap(indexSourceAsBytes, true, updateIndexRequest.getContentType()); + updateResponse.setGetResult(UpdateHelper.extractGetResult(updateRequest, concreteIndex, + indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + } + // set translated request as replica request + replicaRequest = new BulkItemRequest(bulkReqId, updateIndexRequest); + + } else if (opType == Engine.Operation.TYPE.DELETE) { + assert result instanceof Engine.DeleteResult : result.getClass(); + final DeleteRequest updateDeleteRequest = translate.action(); + + final DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), updateDeleteRequest.type(), updateDeleteRequest.id(), + result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound()); + + updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(), + deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), deleteResponse.getPrimaryTerm(), + deleteResponse.getVersion(), deleteResponse.getResult()); + + final GetResult getResult = UpdateHelper.extractGetResult(updateRequest, concreteIndex, deleteResponse.getVersion(), + translate.updatedSourceAsMap(), translate.updateSourceContentType(), null); + + updateResponse.setGetResult(getResult); + // set translated request as replica request + replicaRequest = new BulkItemRequest(bulkReqId, updateDeleteRequest); + + } else { + throw new IllegalArgumentException("unknown operation type: " + opType); + } + + return new BulkItemResultHolder(updateResponse, result, replicaRequest); + } + + /** + * Executes update request once, delegating to a index or delete operation after translation. + * NOOP updates are indicated by returning a null operation in {@link BulkItemResultHolder} + */ + static BulkItemResultHolder executeUpdateRequestOnce(UpdateRequest updateRequest, IndexShard primary, + IndexMetaData metaData, String concreteIndex, + UpdateHelper updateHelper, LongSupplier nowInMillis, + BulkItemRequest primaryItemRequest, int bulkReqId, + final MappingUpdatePerformer mappingUpdater) throws Exception { + final UpdateHelper.Result translate; + // translate update request + try { + translate = updateHelper.prepare(updateRequest, primary, nowInMillis); + } catch (Exception failure) { + // we may fail translating a update to index or delete operation + // we use index result to communicate failure while translating update request + final Engine.Result result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); + return new BulkItemResultHolder(null, result, primaryItemRequest); + } + + final Engine.Result result; + // execute translated update request + switch (translate.getResponseResult()) { + case CREATED: + case UPDATED: + IndexRequest indexRequest = translate.action(); + MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + indexRequest.process(mappingMd, concreteIndex); + result = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater); + break; + case DELETED: + DeleteRequest deleteRequest = translate.action(); + result = executeDeleteRequestOnPrimary(deleteRequest, primary, mappingUpdater); + break; + case NOOP: + primary.noopUpdate(updateRequest.type()); + result = null; + break; + default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + } + + if (result == null) { + // this is a noop operation + final UpdateResponse updateResponse = translate.action(); + return new BulkItemResultHolder(updateResponse, result, primaryItemRequest); + } else if (result.hasFailure()) { + // There was a result, and the result was a failure + return new BulkItemResultHolder(null, result, primaryItemRequest); + } else { + // It was successful, we need to construct the response and return it + return processUpdateResponse(updateRequest, concreteIndex, result, translate, primary, bulkReqId); + } + } + /** * Executes update request, delegating to a index or delete operation after translation, * handles retries on version conflict and constructs update response - * NOTE: reassigns bulk item request at requestIndex for replicas to - * execute translated update request (NOOP update is an exception). NOOP updates are - * indicated by returning a null operation in {@link BulkItemResultHolder} - * */ + * NOOP updates are indicated by returning a null operation + * in {@link BulkItemResultHolder} + */ private static BulkItemResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary, IndexMetaData metaData, BulkShardRequest request, int requestIndex, UpdateHelper updateHelper, LongSupplier nowInMillis, final MappingUpdatePerformer mappingUpdater) throws Exception { - Engine.Result result = null; - UpdateResponse updateResponse = null; - BulkItemRequest replicaRequest = request.items()[requestIndex]; - int maxAttempts = updateRequest.retryOnConflict(); - for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { - final UpdateHelper.Result translate; - // translate update request - try { - translate = updateHelper.prepare(updateRequest, primary, nowInMillis); - } catch (Exception failure) { - // we may fail translating a update to index or delete operation - // we use index result to communicate failure while translating update request - result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO); - break; // out of retry loop - } - // execute translated update request - switch (translate.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = translate.action(); - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); - indexRequest.process(mappingMd, request.index()); - result = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater); - break; - case DELETED: - DeleteRequest deleteRequest = translate.action(); - result = executeDeleteRequestOnPrimary(deleteRequest, primary, mappingUpdater); - break; - case NOOP: - primary.noopUpdate(updateRequest.type()); - break; - default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); - } - if (result == null) { - // this is a noop operation - updateResponse = translate.action(); - break; // out of retry loop - } else if (result.hasFailure() == false) { - // enrich update response and - // set translated update (index/delete) request for replica execution in bulk items - switch (result.getOperationType()) { - case INDEX: - assert result instanceof Engine.IndexResult : result.getClass(); - IndexRequest updateIndexRequest = translate.action(); - final IndexResponse indexResponse = new IndexResponse( - primary.shardId(), - updateIndexRequest.type(), - updateIndexRequest.id(), - result.getSeqNo(), - primary.getPrimaryTerm(), - result.getVersion(), - ((Engine.IndexResult) result).isCreated()); - BytesReference indexSourceAsBytes = updateIndexRequest.source(); - updateResponse = new UpdateResponse( - indexResponse.getShardInfo(), - indexResponse.getShardId(), - indexResponse.getType(), - indexResponse.getId(), - indexResponse.getSeqNo(), - indexResponse.getPrimaryTerm(), - indexResponse.getVersion(), - indexResponse.getResult()); - if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || - (updateRequest.fields() != null && updateRequest.fields().length > 0)) { - Tuple> sourceAndContent = - XContentHelper.convertToMap(indexSourceAsBytes, true, updateIndexRequest.getContentType()); - updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), - indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); - } - // set translated request as replica request - replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateIndexRequest); - break; - case DELETE: - assert result instanceof Engine.DeleteResult : result.getClass(); - DeleteRequest updateDeleteRequest = translate.action(); - DeleteResponse deleteResponse = new DeleteResponse( - primary.shardId(), - updateDeleteRequest.type(), - updateDeleteRequest.id(), - result.getSeqNo(), - primary.getPrimaryTerm(), - result.getVersion(), - ((Engine.DeleteResult) result).isFound()); - updateResponse = new UpdateResponse( - deleteResponse.getShardInfo(), - deleteResponse.getShardId(), - deleteResponse.getType(), - deleteResponse.getId(), - deleteResponse.getSeqNo(), - deleteResponse.getPrimaryTerm(), - deleteResponse.getVersion(), - deleteResponse.getResult()); - final GetResult getResult = updateHelper.extractGetResult( - updateRequest, - request.index(), - deleteResponse.getVersion(), - translate.updatedSourceAsMap(), - translate.updateSourceContentType(), - null); - updateResponse.setGetResult(getResult); - // set translated request as replica request - replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest); - break; - } - assert result.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO; - // successful operation - break; // out of retry loop - } else if (result.getFailure() instanceof VersionConflictEngineException == false) { - // not a version conflict exception - break; // out of retry loop + BulkItemRequest primaryItemRequest = request.items()[requestIndex]; + assert primaryItemRequest.request() == updateRequest + : "expected bulk item request to contain the original update request, got: " + + primaryItemRequest.request() + " and " + updateRequest; + + BulkItemResultHolder holder = null; + // There must be at least one attempt + int maxAttempts = Math.max(1, updateRequest.retryOnConflict()); + for (int attemptCount = 0; attemptCount < maxAttempts; attemptCount++) { + + holder = executeUpdateRequestOnce(updateRequest, primary, metaData, request.index(), updateHelper, + nowInMillis, primaryItemRequest, request.items()[requestIndex].id(), mappingUpdater); + + // It was either a successful request, or it was a non-conflict failure + if (holder.isVersionConflict() == false) { + return holder; } } - return new BulkItemResultHolder(updateResponse, result, replicaRequest); + // We ran out of tries and haven't returned a valid bulk item response, so return the last one generated + return holder; } /** Modes for executing item request on replica depending on corresponding primary execution result */ @@ -455,20 +473,7 @@ public class TransportShardBulkAction extends TransportWriteAction { + throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), + "Mappings are not available on the replica yet, triggered update: " + update); + }); + case DELETE: + DeleteRequest deleteRequest = (DeleteRequest) docWriteRequest; + return replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryTerm, primaryResponse.getVersion(), + deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery(), + update -> { + throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), + "Mappings are not available on the replica yet, triggered update: " + update); + }); + default: + throw new IllegalStateException("Unexpected request operation type on replica: " + + docWriteRequest.opType().getLowercase()); } - return location; - } - - private static Translog.Location locationToSync(Translog.Location current, - Translog.Location next) { - /* here we are moving forward in the translog with each operation. Under the hood this might - * cross translog files which is ok since from the user perspective the translog is like a - * tape where only the highest location needs to be fsynced in order to sync all previous - * locations even though they are not in the same file. When the translog rolls over files - * the previous file is fsynced on after closing if needed.*/ - assert next != null : "next operation can't be null"; - assert current == null || current.compareTo(next) < 0 : - "translog locations are not increasing"; - return next; - } - - /** - * Execute the given {@link IndexRequest} on a replica shard, throwing a - * {@link RetryOnReplicaException} if the operation needs to be re-tried. - */ - private static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, - long primaryTerm, IndexShard replica) throws IOException { - - final Engine.Index operation; - try { - operation = prepareIndexOperationOnReplica(primaryResponse, request, primaryTerm, replica); - } catch (MapperParsingException e) { - return new Engine.IndexResult(e, primaryResponse.getVersion(), primaryResponse.getSeqNo()); - } - - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - final ShardId shardId = replica.shardId(); - throw new RetryOnReplicaException(shardId, - "Mappings are not available on the replica yet, triggered update: " + update); - } - return replica.index(operation); - } - - /** Utility method to prepare an index operation on replica shards */ - static Engine.Index prepareIndexOperationOnReplica( - DocWriteResponse primaryResponse, - IndexRequest request, - long primaryTerm, - IndexShard replica) { - - final ShardId shardId = replica.shardId(); - final long version = primaryResponse.getVersion(); - final long seqNo = primaryResponse.getSeqNo(); - final SourceToParse sourceToParse = - SourceToParse.source(shardId.getIndexName(), - request.type(), request.id(), request.source(), request.getContentType()) - .routing(request.routing()).parent(request.parent()); - final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery(); - assert versionType.validateVersionForWrites(version); - - return replica.prepareIndexOnReplica(sourceToParse, seqNo, primaryTerm, version, versionType, - request.getAutoGeneratedTimestamp(), request.isRetry()); - } - - /** Utility method to prepare an index operation on primary shards */ - private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { - final SourceToParse sourceToParse = - SourceToParse.source(request.index(), request.type(), - request.id(), request.source(), request.getContentType()) - .routing(request.routing()).parent(request.parent()); - return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), - request.getAutoGeneratedTimestamp(), request.isRetry()); } /** Executes index operation on primary shard after updates mapping if dynamic mappings are found */ static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, MappingUpdatePerformer mappingUpdater) throws Exception { - // Update the mappings if parsing the documents includes new dynamic updates - final Engine.Index preUpdateOperation; - final Mapping mappingUpdate; - final boolean mappingUpdateNeeded; + final SourceToParse sourceToParse = + SourceToParse.source(request.index(), request.type(), request.id(), request.source(), request.getContentType()) + .routing(request.routing()).parent(request.parent()); try { - preUpdateOperation = prepareIndexOperationOnPrimary(request, primary); - mappingUpdate = preUpdateOperation.parsedDoc().dynamicMappingsUpdate(); - mappingUpdateNeeded = mappingUpdate != null; - if (mappingUpdateNeeded) { - mappingUpdater.updateMappings(mappingUpdate, primary.shardId(), request.type()); - } - } catch (MapperParsingException | IllegalArgumentException failure) { - return new Engine.IndexResult(failure, request.version()); + // if a mapping update is required to index this request, issue a mapping update on the master, and abort the + // current indexing operation so that it can be retried with the updated mapping from the master + // The early abort uses the RetryOnPrimaryException, but any other exception would be fine as well. + return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse, + request.getAutoGeneratedTimestamp(), request.isRetry(), update -> { + mappingUpdater.updateMappings(update, primary.shardId(), sourceToParse.type()); + throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated"); + }); + } catch (ReplicationOperation.RetryOnPrimaryException e) { + return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse, + request.getAutoGeneratedTimestamp(), request.isRetry(), update -> mappingUpdater.verifyMappings(update, primary.shardId())); } - - // Verify that there are no more mappings that need to be applied. If there are failures, a - // ReplicationOperation.RetryOnPrimaryException is thrown. - final Engine.Index operation; - if (mappingUpdateNeeded) { - try { - operation = prepareIndexOperationOnPrimary(request, primary); - mappingUpdater.verifyMappings(operation.parsedDoc().dynamicMappingsUpdate(), primary.shardId()); - } catch (MapperParsingException | IllegalStateException e) { - // there was an error in parsing the document that was not because - // of pending mapping updates, so return a failure for the result - return new Engine.IndexResult(e, request.version()); - } - } else { - // There was no mapping update, the operation is the same as the pre-update version. - operation = preUpdateOperation; - } - - return primary.index(operation); } private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary, - final MappingUpdatePerformer mappingUpdater) throws Exception { - boolean mappingUpdateNeeded = false; - if (primary.indexSettings().isSingleType()) { - // When there is a single type, the unique identifier is only composed of the _id, - // so there is no way to differenciate foo#1 from bar#1. This is especially an issue - // if a user first deletes foo#1 and then indexes bar#1: since we do not encode the - // _type in the uid it might look like we are reindexing the same document, which - // would fail if bar#1 is indexed with a lower version than foo#1 was deleted with. - // In order to work around this issue, we make deletions create types. This way, we - // fail if index and delete operations do not use the same type. - try { - Mapping update = primary.mapperService().documentMapperWithAutoCreate(request.type()).getMapping(); - if (update != null) { - mappingUpdateNeeded = true; + MappingUpdatePerformer mappingUpdater) throws Exception { + try { + return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(), + update -> { mappingUpdater.updateMappings(update, primary.shardId(), request.type()); - } - } catch (MapperParsingException | IllegalArgumentException e) { - return new Engine.DeleteResult(e, request.version(), SequenceNumbersService.UNASSIGNED_SEQ_NO, false); - } + throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated"); + }); + } catch (ReplicationOperation.RetryOnPrimaryException e) { + return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(), + update -> mappingUpdater.verifyMappings(update, primary.shardId())); } - if (mappingUpdateNeeded) { - Mapping update = primary.mapperService().documentMapperWithAutoCreate(request.type()).getMapping(); - mappingUpdater.verifyMappings(update, primary.shardId()); - } - final Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); - return primary.delete(delete); - } - - private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request, - final long primaryTerm, IndexShard replica) throws Exception { - if (replica.indexSettings().isSingleType()) { - // We need to wait for the replica to have the mappings - Mapping update; - try { - update = replica.mapperService().documentMapperWithAutoCreate(request.type()).getMapping(); - } catch (MapperParsingException | IllegalArgumentException e) { - return new Engine.DeleteResult(e, request.version(), primaryResponse.getSeqNo(), false); - } - if (update != null) { - final ShardId shardId = replica.shardId(); - throw new RetryOnReplicaException(shardId, - "Mappings are not available on the replica yet, triggered update: " + update); - } - } - - final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery(); - final long version = primaryResponse.getVersion(); - assert versionType.validateVersionForWrites(version); - final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(), - primaryResponse.getSeqNo(), primaryTerm, version, versionType); - return replica.delete(delete); - } - - private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, long primaryTerm, - IndexShard replica) throws IOException { - final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOpOnReplica( - primaryFailure.getSeqNo(), primaryTerm, primaryFailure.getMessage()); - return replica.markSeqNoAsNoOp(noOp); } class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { - public void updateMappings(final Mapping update, final ShardId shardId, - final String type) throws Exception { + public void updateMappings(final Mapping update, final ShardId shardId, final String type) { if (update != null) { // can throw timeout exception when updating mappings or ISE for attempting to // update default mappings which are bubbled up @@ -691,8 +576,7 @@ public class TransportShardBulkAction extends TransportWriteAction, ToXContentObject { +public class GetResponse extends ActionResponse implements Iterable, ToXContentObject { GetResult getResult; @@ -136,11 +138,11 @@ public class GetResponse extends ActionResponse implements Iterable, T return getResult.getSource(); } - public Map getFields() { + public Map getFields() { return getResult.getFields(); } - public GetField getField(String name) { + public DocumentField getField(String name) { return getResult.field(name); } @@ -149,7 +151,7 @@ public class GetResponse extends ActionResponse implements Iterable, T */ @Deprecated @Override - public Iterator iterator() { + public Iterator iterator() { return getResult.iterator(); } @@ -158,8 +160,32 @@ public class GetResponse extends ActionResponse implements Iterable, T return getResult.toXContent(builder, params); } + /** + * This method can be used to parse a {@link GetResponse} object when it has been printed out + * as a xcontent using the {@link #toXContent(XContentBuilder, Params)} method. + *

+ * For forward compatibility reason this method might not fail if it tries to parse a field it + * doesn't know. But before returning the result it will check that enough information were + * parsed to return a valid {@link GetResponse} instance and throws a {@link ParsingException} + * otherwise. This is the case when we get a 404 back, which can be parsed as a normal + * {@link GetResponse} with found set to false, or as an elasticsearch exception. The caller + * of this method needs a way to figure out whether we got back a valid get response, which + * can be done by catching ParsingException. + * + * @param parser {@link XContentParser} to parse the response from + * @return a {@link GetResponse} + * @throws IOException is an I/O exception occurs during the parsing + */ public static GetResponse fromXContent(XContentParser parser) throws IOException { GetResult getResult = GetResult.fromXContent(parser); + + // At this stage we ensure that we parsed enough information to return + // a valid GetResponse instance. If it's not the case, we throw an + // exception so that callers know it and can handle it correctly. + if (getResult.getIndex() == null && getResult.getType() == null && getResult.getId() == null) { + throw new ParsingException(parser.getTokenLocation(), + String.format(Locale.ROOT, "Missing required fields [%s,%s,%s]", GetResult._INDEX, GetResult._TYPE, GetResult._ID)); + } return new GetResponse(getResult); } diff --git a/core/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java new file mode 100644 index 00000000000..6f6382d7174 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.resync; + +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.List; + +public final class ResyncReplicationRequest extends ReplicatedWriteRequest { + + private List operations; + + ResyncReplicationRequest() { + super(); + } + + public ResyncReplicationRequest(ShardId shardId, List operations) { + super(shardId); + this.operations = operations; + } + + public List getOperations() { + return operations; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + operations = in.readList(Translog.Operation::readType); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(operations); + } + + @Override + public String toString() { + return "TransportResyncReplicationAction.Request{" + + "shardId=" + shardId + + ", timeout=" + timeout + + ", index='" + index + '\'' + + ", ops=" + operations.size() + + "}"; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java new file mode 100644 index 00000000000..f3dbea04763 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/resync/ResyncReplicationResponse.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.resync; + +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; + +public final class ResyncReplicationResponse extends ReplicationResponse implements WriteResponse { + + @Override + public void setForcedRefresh(boolean forcedRefresh) { + // ignore + } +} diff --git a/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java new file mode 100644 index 00000000000..8f535bfed26 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.resync; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.action.support.replication.ReplicationOperation; +import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; + +import java.util.function.Supplier; + +public class TransportResyncReplicationAction extends TransportWriteAction implements PrimaryReplicaSyncer.SyncAction { + + public static String ACTION_NAME = "indices:admin/seq_no/resync"; + + @Inject + public TransportResyncReplicationAction(Settings settings, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, + ShardStateAction shardStateAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, ResyncReplicationRequest::new, ResyncReplicationRequest::new, ThreadPool.Names.BULK); + } + + @Override + protected void registerRequestHandlers(String actionName, TransportService transportService, Supplier request, + Supplier replicaRequest, String executor) { + transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler()); + // we should never reject resync because of thread pool capacity on primary + transportService.registerRequestHandler(transportPrimaryAction, + () -> new ConcreteShardRequest<>(request), + executor, true, true, + new PrimaryOperationTransportHandler()); + transportService.registerRequestHandler(transportReplicaAction, + () -> new ConcreteReplicaRequest<>(replicaRequest), + executor, true, true, + new ReplicaOperationTransportHandler()); + } + + @Override + protected ResyncReplicationResponse newResponseInstance() { + return new ResyncReplicationResponse(); + } + + @Override + protected ReplicationOperation.Replicas newReplicasProxy() { + // We treat the resync as best-effort for now and don't mark unavailable shard copies as stale. + return new ReplicasProxy(); + } + + @Override + protected void sendReplicaRequest( + final ConcreteReplicaRequest replicaRequest, + final DiscoveryNode node, + final ActionListener listener) { + if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { + super.sendReplicaRequest(replicaRequest, node, listener); + } else { + listener.onResponse(new ReplicaResponse(replicaRequest.getTargetAllocationID(), SequenceNumbersService.UNASSIGNED_SEQ_NO)); + } + } + + @Override + protected WritePrimaryResult shardOperationOnPrimary( + ResyncReplicationRequest request, IndexShard primary) throws Exception { + final ResyncReplicationRequest replicaRequest = performOnPrimary(request, primary); + return new WritePrimaryResult<>(replicaRequest, new ResyncReplicationResponse(), null, null, primary, logger); + } + + public static ResyncReplicationRequest performOnPrimary(ResyncReplicationRequest request, IndexShard primary) { + return request; + } + + @Override + protected WriteReplicaResult shardOperationOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { + Translog.Location location = performOnReplica(request, replica); + return new WriteReplicaResult(request, location, null, replica, logger); + } + + public static Translog.Location performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { + Translog.Location location = null; + for (Translog.Operation operation : request.getOperations()) { + try { + final Engine.Result operationResult = replica.applyTranslogOperation(operation, Engine.Operation.Origin.REPLICA, + update -> { + throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), + "Mappings are not available on the replica yet, triggered update: " + update); + }); + location = syncOperationResultOrThrow(operationResult, location); + } catch (Exception e) { + // if its not a failure to be ignored, let it bubble up + if (!TransportActions.isShardNotAvailableException(e)) { + throw e; + } + } + } + return location; + } + + @Override + public void sync(ResyncReplicationRequest request, Task parentTask, String primaryAllocationId, + ActionListener listener) { + // skip reroute phase + transportService.sendChildRequest( + clusterService.localNode(), + transportPrimaryAction, + new ConcreteShardRequest<>(request, primaryAllocationId), + parentTask, + transportOptions, + new TransportResponseHandler() { + @Override + public ResyncReplicationResponse newInstance() { + return newResponseInstance(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public void handleResponse(ResyncReplicationResponse response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + final Throwable cause = exp.unwrapCause(); + if (TransportActions.isShardNotAvailableException(cause)) { + logger.trace("primary became unavailable during resync, ignoring", exp); + } else { + listener.onFailure(exp); + } + } + }); + } + +} diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 5ed41d0fe65..e75d52db3ef 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -71,7 +71,6 @@ public class SearchTransportService extends AbstractComponent { public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; public static final String QUERY_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query/scroll]"; - public static final String QUERY_FETCH_ACTION_NAME = "indices:data/read/search[phase/query+fetch]"; public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; @@ -117,26 +116,11 @@ public class SearchTransportService extends AbstractComponent { public void sendExecuteQuery(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final SearchActionListener listener) { // we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request - // this used to be the QUERY_AND_FETCH which doesn't exists anymore. + // this used to be the QUERY_AND_FETCH which doesn't exist anymore. final boolean fetchDocuments = request.numberOfShards() == 1; Supplier supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new; - if (connection.getVersion().before(Version.V_5_3_0) && fetchDocuments) { - // this is a BWC layer for pre 5.3 indices - if (request.scroll() != null) { - /** - * This is needed for nodes pre 5.3 when the single shard optimization is used. - * These nodes will set the last emitted doc only if the removed `query_and_fetch` search type is set - * in the request. See {@link SearchType}. - */ - request.searchType(SearchType.QUERY_AND_FETCH); - } - // TODO this BWC layer can be removed once this is back-ported to 5.3 - transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task, - new ActionListenerResponseHandler<>(listener, supplier)); - } else { - transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task, - new ActionListenerResponseHandler<>(listener, supplier)); - } + transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task, + new ActionListenerResponseHandler<>(listener, supplier)); } public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task, @@ -353,20 +337,6 @@ public class SearchTransportService extends AbstractComponent { }); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new); - // this is for BWC with 5.3 until the QUERY_AND_FETCH removal change has been back-ported to 5.x - // in 5.3 we will only execute a `indices:data/read/search[phase/query+fetch]` if the node is pre 5.3 - // such that we can remove this after the back-port. - transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH, - new TaskAwareTransportRequestHandler() { - @Override - public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { - assert request.numberOfShards() == 1 : "expected single shard request but got: " + request.numberOfShards(); - SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task); - channel.sendResponse(result); - } - }); - TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_ACTION_NAME, QueryFetchSearchResult::new); - transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH, new TaskAwareTransportRequestHandler() { @Override diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 720fb17ae94..51681a62b3a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -184,7 +183,7 @@ public class TransportSearchAction extends HandledTransportAction indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState)); OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); if (remoteClusterIndices.isEmpty()) { - executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), + executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteClusterIndices, Collections.emptyList(), (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener); } else { remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(), @@ -193,7 +192,7 @@ public class TransportSearchAction extends HandledTransportAction remoteAliasFilters = new HashMap<>(); BiFunction clusterNodeLookup = processRemoteShards(searchShardsResponses, remoteClusterIndices, remoteShardIterators, remoteAliasFilters); - executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteShardIterators, + executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, remoteClusterIndices, remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener); }, listener::onFailure)); } @@ -249,16 +248,16 @@ public class TransportSearchAction extends HandledTransportAction remoteShardIterators, BiFunction remoteConnections, - ClusterState clusterState, Map remoteAliasMap, - ActionListener listener) { + Map remoteClusterIndices, List remoteShardIterators, + BiFunction remoteConnections, ClusterState clusterState, + Map remoteAliasMap, ActionListener listener) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api final Index[] indices; - if (localIndices.indices().length == 0 && remoteShardIterators.size() > 0) { + if (localIndices.indices().length == 0 && remoteClusterIndices.isEmpty() == false) { indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified } else { indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index 4b1873e8d06..b8a5f3782bd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -40,7 +40,6 @@ import java.util.Arrays; import java.util.List; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; /** * Base class for write action responses. @@ -187,8 +186,8 @@ public class ReplicationResponse extends ActionResponse { total = parser.intValue(); } else if (SUCCESSFUL.equals(currentFieldName)) { successful = parser.intValue(); - } else if (FAILED.equals(currentFieldName) == false) { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + } else { + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { if (FAILURES.equals(currentFieldName)) { @@ -197,8 +196,10 @@ public class ReplicationResponse extends ActionResponse { failuresList.add(Failure.fromXContent(parser)); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); // skip potential inner arrays for forward compatibility } + } else if (token == XContentParser.Token.START_OBJECT) { + parser.skipChildren(); // skip potential inner arrays for forward compatibility } } Failure[] failures = EMPTY; @@ -365,15 +366,15 @@ public class ReplicationResponse extends ActionResponse { status = RestStatus.valueOf(parser.text()); } else if (PRIMARY.equals(currentFieldName)) { primary = parser.booleanValue(); - } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if (REASON.equals(currentFieldName)) { reason = ElasticsearchException.fromXContent(parser); } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); // skip potential inner objects for forward compatibility } + } else if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); // skip potential inner arrays for forward compatibility } } return new Failure(new ShardId(shardIndex, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), nodeId, reason, status, primary); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 946692f1826..b364870e23a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -95,17 +95,17 @@ public abstract class TransportReplicationAction< Response extends ReplicationResponse > extends TransportAction { - private final TransportService transportService; + protected final TransportService transportService; protected final ClusterService clusterService; protected final ShardStateAction shardStateAction; - private final IndicesService indicesService; - private final TransportRequestOptions transportOptions; - private final String executor; + protected final IndicesService indicesService; + protected final TransportRequestOptions transportOptions; + protected final String executor; // package private for testing - private final String transportReplicaAction; - private final String transportPrimaryAction; - private final ReplicationOperation.Replicas replicasProxy; + protected final String transportReplicaAction; + protected final String transportPrimaryAction; + protected final ReplicationOperation.Replicas replicasProxy; protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, @@ -122,6 +122,15 @@ public abstract class TransportReplicationAction< this.transportPrimaryAction = actionName + "[p]"; this.transportReplicaAction = actionName + "[r]"; + registerRequestHandlers(actionName, transportService, request, replicaRequest, executor); + + this.transportOptions = transportOptions(); + + this.replicasProxy = newReplicasProxy(); + } + + protected void registerRequestHandlers(String actionName, TransportService transportService, Supplier request, + Supplier replicaRequest, String executor) { transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new OperationTransportHandler()); transportService.registerRequestHandler(transportPrimaryAction, () -> new ConcreteShardRequest<>(request), executor, new PrimaryOperationTransportHandler()); @@ -130,10 +139,6 @@ public abstract class TransportReplicationAction< () -> new ConcreteReplicaRequest<>(replicaRequest), executor, true, true, new ReplicaOperationTransportHandler()); - - this.transportOptions = transportOptions(); - - this.replicasProxy = newReplicasProxy(); } @Override @@ -178,7 +183,7 @@ public abstract class TransportReplicationAction< /** * Synchronously execute the specified replica operation. This is done under a permit from - * {@link IndexShard#acquireReplicaOperationPermit(long, ActionListener, String)}. + * {@link IndexShard#acquireReplicaOperationPermit(long, long, ActionListener, String)}. * * @param shardRequest the request to the replica shard * @param replica the replica shard to perform the operation on @@ -217,7 +222,12 @@ public abstract class TransportReplicationAction< || TransportActions.isShardNotAvailableException(e); } - class OperationTransportHandler implements TransportRequestHandler { + protected class OperationTransportHandler implements TransportRequestHandler { + + public OperationTransportHandler() { + + } + @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { execute(task, request, new ActionListener() { @@ -250,7 +260,12 @@ public abstract class TransportReplicationAction< } } - class PrimaryOperationTransportHandler implements TransportRequestHandler> { + protected class PrimaryOperationTransportHandler implements TransportRequestHandler> { + + public PrimaryOperationTransportHandler() { + + } + @Override public void messageReceived(final ConcreteShardRequest request, final TransportChannel channel) throws Exception { throw new UnsupportedOperationException("the task parameter is required for this operation"); @@ -314,7 +329,6 @@ public abstract class TransportReplicationAction< }); } else { setPhase(replicationTask, "primary"); - final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); final ActionListener listener = createResponseListener(primaryShardReference); createReplicatedOperation(request, ActionListener.wrap(result -> result.respond(listener), listener::onFailure), @@ -437,7 +451,7 @@ public abstract class TransportReplicationAction< } } - class ReplicaOperationTransportHandler implements TransportRequestHandler> { + public class ReplicaOperationTransportHandler implements TransportRequestHandler> { @Override public void messageReceived( @@ -507,7 +521,6 @@ public abstract class TransportReplicationAction< @Override public void onResponse(Releasable releasable) { try { - replica.updateGlobalCheckpointOnReplica(globalCheckpoint); final ReplicaResult replicaResult = shardOperationOnReplica(request, replica); releasable.close(); // release shard operation lock before responding to caller final TransportReplicationAction.ReplicaResponse response = @@ -582,7 +595,7 @@ public abstract class TransportReplicationAction< throw new ShardNotFoundException(this.replica.shardId(), "expected aID [{}] but found [{}]", targetAllocationID, actualAllocationId); } - replica.acquireReplicaOperationPermit(request.primaryTerm, this, executor); + replica.acquireReplicaOperationPermit(request.primaryTerm, globalCheckpoint, this, executor); } /** @@ -1049,7 +1062,11 @@ public abstract class TransportReplicationAction< * shards. It also encapsulates the logic required for failing the replica * if deemed necessary as well as marking it as stale when needed. */ - class ReplicasProxy implements ReplicationOperation.Replicas { + protected class ReplicasProxy implements ReplicationOperation.Replicas { + + public ReplicasProxy() { + + } @Override public void performOn( @@ -1112,13 +1129,13 @@ public abstract class TransportReplicationAction< private R request; - ConcreteShardRequest(Supplier requestSupplier) { + public ConcreteShardRequest(Supplier requestSupplier) { request = requestSupplier.get(); // null now, but will be populated by reading from the streams targetAllocationID = null; } - ConcreteShardRequest(R request, String targetAllocationID) { + public ConcreteShardRequest(R request, String targetAllocationID) { Objects.requireNonNull(request); Objects.requireNonNull(targetAllocationID); this.request = request; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 938e90b82b2..30f72e454df 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -32,6 +33,11 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; @@ -43,6 +49,7 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -67,6 +74,37 @@ public abstract class TransportWriteAction< indexNameExpressionResolver, request, replicaRequest, executor); } + /** Syncs operation result to the translog or throws a shard not available failure */ + protected static Location syncOperationResultOrThrow(final Engine.Result operationResult, + final Location currentLocation) throws Exception { + final Location location; + if (operationResult.hasFailure()) { + // check if any transient write operation failures should be bubbled up + Exception failure = operationResult.getFailure(); + assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure; + if (!TransportActions.isShardNotAvailableException(failure)) { + throw failure; + } else { + location = currentLocation; + } + } else { + location = locationToSync(currentLocation, operationResult.getTranslogLocation()); + } + return location; + } + + protected static Location locationToSync(Location current, Location next) { + /* here we are moving forward in the translog with each operation. Under the hood this might + * cross translog files which is ok since from the user perspective the translog is like a + * tape where only the highest location needs to be fsynced in order to sync all previous + * locations even though they are not in the same file. When the translog rolls over files + * the previous file is fsynced on after closing if needed.*/ + assert next != null : "next operation can't be null"; + assert current == null || current.compareTo(next) < 0 : + "translog locations are not increasing"; + return next; + } + @Override protected ReplicationOperation.Replicas newReplicasProxy() { return new WriteActionReplicasProxy(); @@ -356,8 +394,8 @@ public abstract class TransportWriteAction< createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); } - public ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer onPrimaryDemoted, - final Consumer onIgnoredFailure) { + private ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer onPrimaryDemoted, + final Consumer onIgnoredFailure) { return new ShardStateAction.Listener() { @Override public void onSuccess() { diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java index 4ddbe541993..fdbd8e6fe70 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java @@ -44,9 +44,6 @@ public class BaseTasksResponse extends ActionResponse { private List taskFailures; private List nodeFailures; - public BaseTasksResponse() { - } - public BaseTasksResponse(List taskFailures, List nodeFailures) { this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures)); this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures)); diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 189803f818f..a9d0e305f14 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -184,7 +184,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio (request.fields() != null && request.fields().length > 0)) { Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); } else { update.setGetResult(null); } @@ -201,7 +201,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio bulkAction.execute(toSingleItemBulkRequest(indexRequest), wrapBulkResponse( ActionListener.wrap(response -> { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) @@ -212,7 +212,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio bulkAction.execute(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse( ActionListener.wrap(response -> { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 6d3098c5caa..fbf005415d9 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; @@ -38,7 +39,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.engine.DocumentSourceMissingException; -import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; @@ -314,8 +314,9 @@ public class UpdateHelper extends AbstractComponent { * Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response. * For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response */ - public GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map source, - XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) { + public static GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, + final Map source, XContentType sourceContentType, + @Nullable final BytesReference sourceAsBytes) { if ((request.fields() == null || request.fields().length == 0) && (request.fetchSource() == null || request.fetchSource().fetchSource() == false)) { return null; @@ -323,7 +324,7 @@ public class UpdateHelper extends AbstractComponent { SourceLookup sourceLookup = new SourceLookup(); sourceLookup.setSource(source); boolean sourceRequested = false; - Map fields = null; + Map fields = null; if (request.fields() != null && request.fields().length > 0) { for (String field : request.fields()) { if (field.equals("_source")) { @@ -335,12 +336,12 @@ public class UpdateHelper extends AbstractComponent { if (fields == null) { fields = new HashMap<>(2); } - GetField getField = fields.get(field); - if (getField == null) { - getField = new GetField(field, new ArrayList<>(2)); - fields.put(field, getField); + DocumentField documentField = fields.get(field); + if (documentField == null) { + documentField = new DocumentField(field, new ArrayList<>(2)); + fields.put(field, documentField); } - getField.getValues().add(value); + documentField.getValues().add(value); } } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/analysis/common/AbstractCompoundWordTokenFilterFactory.java similarity index 93% rename from core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java rename to core/src/main/java/org/elasticsearch/analysis/common/AbstractCompoundWordTokenFilterFactory.java index 91c984c7a6b..b59cc166f09 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/analysis/common/AbstractCompoundWordTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis.compound; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase; @@ -38,7 +38,7 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok protected final boolean onlyLongestMatch; protected final CharArraySet wordList; - public AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + protected AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 74fc600d627..ffe5dfa3e4b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -238,9 +238,12 @@ final class Bootstrap { return keystore; } - - private static Environment createEnvironment(boolean foreground, Path pidFile, - SecureSettings secureSettings, Settings initialSettings) { + private static Environment createEnvironment( + final boolean foreground, + final Path pidFile, + final SecureSettings secureSettings, + final Settings initialSettings, + final Path configPath) { Terminal terminal = foreground ? Terminal.DEFAULT : null; Settings.Builder builder = Settings.builder(); if (pidFile != null) { @@ -250,7 +253,7 @@ final class Bootstrap { if (secureSettings != null) { builder.setSecureSettings(secureSettings); } - return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap()); + return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap(), configPath); } private void start() throws NodeValidationException { @@ -266,13 +269,6 @@ final class Bootstrap { } } - /** Set the system property before anything has a chance to trigger its use */ - // TODO: why? is it just a bad default somewhere? or is it some BS around 'but the client' garbage <-- my guess - @SuppressForbidden(reason = "sets logger prefix on initialization") - static void initLoggerPrefix() { - System.setProperty("es.logger.prefix", ""); - } - /** * This method is invoked by {@link Elasticsearch#main(String[])} to startup elasticsearch. */ @@ -281,9 +277,6 @@ final class Bootstrap { final Path pidFile, final boolean quiet, final Environment initialEnv) throws BootstrapException, NodeValidationException, UserException { - // Set the system property before anything has a chance to trigger its use - initLoggerPrefix(); - // force the class initializer for BootstrapInfo to run before // the security manager is installed BootstrapInfo.init(); @@ -291,7 +284,7 @@ final class Bootstrap { INSTANCE = new Bootstrap(); final SecureSettings keystore = loadSecureSettings(initialEnv); - Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings()); + final Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings(), initialEnv.configFile()); try { LogConfigurator.configure(environment); } catch (IOException e) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index 71a7fad6232..99574c2b39b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -24,6 +24,7 @@ import com.sun.jna.Native; import com.sun.jna.NativeLong; import com.sun.jna.Pointer; import com.sun.jna.Structure; +import com.sun.jna.WString; import com.sun.jna.win32.StdCallLibrary; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; @@ -223,6 +224,17 @@ final class JNAKernel32Library { */ native boolean CloseHandle(Pointer handle); + /** + * Retrieves the short path form of the specified path. See + * {@code GetShortPathName}. + * + * @param lpszLongPath the path string + * @param lpszShortPath a buffer to receive the short name + * @param cchBuffer the size of the buffer + * @return the length of the string copied into {@code lpszShortPath}, otherwise zero for failure + */ + native int GetShortPathNameW(WString lpszLongPath, char[] lpszShortPath, int cchBuffer); + /** * Creates or opens a new job object * diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index d4e11af71ac..b28cc398249 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -21,6 +21,7 @@ package org.elasticsearch.bootstrap; import com.sun.jna.Native; import com.sun.jna.Pointer; +import com.sun.jna.WString; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.common.logging.Loggers; @@ -194,6 +195,35 @@ class JNANatives { } } + /** + * Retrieves the short path form of the specified path. + * + * @param path the path + * @return the short path name (or the original path if getting the short path name fails for any reason) + */ + static String getShortPathName(String path) { + assert Constants.WINDOWS; + try { + final WString longPath = new WString("\\\\?\\" + path); + // first we get the length of the buffer needed + final int length = JNAKernel32Library.getInstance().GetShortPathNameW(longPath, null, 0); + if (length == 0) { + logger.warn("failed to get short path name: {}", Native.getLastError()); + return path; + } + final char[] shortPath = new char[length]; + // knowing the length of the buffer, now we get the short name + if (JNAKernel32Library.getInstance().GetShortPathNameW(longPath, shortPath, length) > 0) { + return Native.toString(shortPath); + } else { + logger.warn("failed to get short path name: {}", Native.getLastError()); + return path; + } + } catch (final UnsatisfiedLinkError e) { + return path; + } + } + static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) { // The console Ctrl handler is necessary on Windows platforms only. if (Constants.WINDOWS) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java index ad6ec985ca1..6dae75e63be 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -76,6 +76,20 @@ final class Natives { JNANatives.tryVirtualLock(); } + /** + * Retrieves the short path form of the specified path. + * + * @param path the path + * @return the short path name (or the original path if getting the short path name fails for any reason) + */ + static String getShortPathName(final String path) { + if (!JNA_AVAILABLE) { + logger.warn("cannot obtain short path for [{}] because JNA is not avilable", path); + return path; + } + return JNANatives.getShortPathName(path); + } + static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) { if (!JNA_AVAILABLE) { logger.warn("cannot register console handler because JNA is not available"); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index 5ffb89b6ee4..9504bdefa59 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -256,7 +256,7 @@ final class Security { addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink"); addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink"); addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink"); - addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink"); + addPath(policy, "path.conf'", environment.configFile(), "read,readlink"); // read-write dirs addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete"); addPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete"); @@ -280,26 +280,6 @@ final class Security { throw new IllegalStateException("unable to access [" + path + "]", e); } } - /* - * If path.data and default.path.data are set, we need read access to the paths in default.path.data to check for the existence of - * index directories there that could have arisen from a bug in the handling of simultaneous configuration of path.data and - * default.path.data that was introduced in Elasticsearch 5.3.0. - * - * If path.data is not set then default.path.data would take precedence in setting the data paths for the environment and - * permissions would have been granted above. - * - * If path.data is not set and default.path.data is not set, then we would fallback to the default data directory under - * Elasticsearch home and again permissions would have been granted above. - * - * If path.data is set and default.path.data is not set, there is nothing to do here. - */ - if (Environment.PATH_DATA_SETTING.exists(environment.settings()) - && Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings())) { - for (final String path : Environment.DEFAULT_PATH_DATA_SETTING.get(environment.settings())) { - // write permissions are not needed here, we are not going to be writing to any paths here - addPath(policy, Environment.DEFAULT_PATH_DATA_SETTING.getKey(), getPath(path), "read,readlink"); - } - } for (Path path : environment.repoFiles()) { addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete"); } @@ -309,11 +289,6 @@ final class Security { } } - @SuppressForbidden(reason = "read path that is not configured in environment") - private static Path getPath(final String path) { - return PathUtils.get(path); - } - /** * Add dynamic {@link SocketPermission}s based on HTTP and transport settings. * @@ -427,27 +402,6 @@ final class Security { policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); } - /** - * Add access to a directory iff it exists already - * @param policy current policy to add permissions to - * @param configurationName the configuration name associated with the path (for error messages only) - * @param path the path itself - * @param permissions set of file permissions to grant to the path - */ - static void addPathIfExists(Permissions policy, String configurationName, Path path, String permissions) { - if (Files.isDirectory(path)) { - // add each path twice: once for itself, again for files underneath it - policy.add(new FilePermission(path.toString(), permissions)); - policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); - try { - path.getFileSystem().provider().checkAccess(path.toRealPath(), AccessMode.READ); - } catch (IOException e) { - throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e); - } - } - } - - /** * Ensures configured directory {@code path} exists. * @throws IOException if {@code path} exists, but is not a directory, not accessible, or broken symbolic link. diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java index 77cadaa9043..f1616ba0eea 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Platforms; @@ -99,7 +100,22 @@ final class Spawner implements Closeable { private Process spawnNativePluginController( final Path spawnPath, final Path tmpPath) throws IOException { - final ProcessBuilder pb = new ProcessBuilder(spawnPath.toString()); + final String command; + if (Constants.WINDOWS) { + /* + * We have to get the short path name or starting the process could fail due to max path limitations. The underlying issue here + * is that starting the process on Windows ultimately involves the use of CreateProcessW. CreateProcessW has a limitation that + * if its first argument (the application name) is null, then its second argument (the command line for the process to start) is + * restricted in length to 260 characters (cf. https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425.aspx). Since + * this is exactly how the JDK starts the process on Windows (cf. + * http://hg.openjdk.java.net/jdk8/jdk8/jdk/file/687fd7c7986d/src/windows/native/java/lang/ProcessImpl_md.c#l319), this + * limitation is in force. As such, we use the short name to avoid any such problems. + */ + command = Natives.getShortPathName(spawnPath.toString()); + } else { + command = spawnPath.toString(); + } + final ProcessBuilder pb = new ProcessBuilder(command); // the only environment variable passes on the path to the temporary directory pb.environment().clear(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java index d9ca9698717..c9971a8a72a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java @@ -242,7 +242,6 @@ final class SystemCallFilter { static { Map m = new HashMap<>(); m.put("amd64", new Arch(0xC000003E, 0x3FFFFFFF, 57, 58, 59, 322, 317)); - m.put("i386", new Arch(0x40000003, 0xFFFFFFFF, 2, 190, 11, 358, 354)); ARCHITECTURES = Collections.unmodifiableMap(m); } diff --git a/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java index 79a4fd7329f..e06d227a24c 100644 --- a/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java +++ b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java @@ -22,10 +22,14 @@ package org.elasticsearch.cli; import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.util.KeyValuePair; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.InternalSettingsPreparer; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -34,10 +38,13 @@ import java.util.Map; public abstract class EnvironmentAwareCommand extends Command { private final OptionSpec settingOption; + private final OptionSpec pathConfOption; public EnvironmentAwareCommand(String description) { super(description); this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class); + this.pathConfOption = + parser.acceptsAll(Arrays.asList("c", "path.conf"), "Configure config path").withRequiredArg().ofType(String.class); } @Override @@ -59,17 +66,22 @@ public abstract class EnvironmentAwareCommand extends Command { settings.put(kvp.key, kvp.value); } - putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf"); putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data"); putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home"); putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs"); - execute(terminal, options, createEnv(terminal, settings)); + final String pathConf = pathConfOption.value(options); + execute(terminal, options, createEnv(terminal, settings, getConfigPath(pathConf))); + } + + @SuppressForbidden(reason = "need path to construct environment") + private static Path getConfigPath(final String pathConf) { + return pathConf == null ? null : Paths.get(pathConf); } /** Create an {@link Environment} for the command to use. Overrideable for tests. */ - protected Environment createEnv(Terminal terminal, Map settings) { - return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings); + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { + return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings, configPath); } /** Ensure the given setting exists, reading it from system properties if not already set. */ diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 5879f1e3579..1cf79c26d9d 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -97,7 +97,7 @@ public abstract class TransportClient extends AbstractClient { .put(InternalSettingsPreparer.prepareSettings(settings)) .put(NetworkService.NETWORK_SERVER.getKey(), false) .put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE); - return new PluginsService(settingsBuilder.build(), null, null, plugins); + return new PluginsService(settingsBuilder.build(), null, null, null, plugins); } protected static Collection> addPlugins(Collection> collection, diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 800304a95ac..56311455a0e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.index; +import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; @@ -34,8 +35,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import java.util.concurrent.TimeoutException; - /** * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). @@ -77,7 +76,7 @@ public class MappingUpdatedAction extends AbstractComponent { * Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)} * using the default timeout. */ - public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) throws Exception { + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) { updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout); } @@ -86,9 +85,9 @@ public class MappingUpdatedAction extends AbstractComponent { * {@code timeout}. When this method returns successfully mappings have * been applied to the master node and propagated to data nodes. */ - public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception { + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) { if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { - throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); + throw new ElasticsearchTimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 591b83c0eff..c11bca5cfc5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.Diff; @@ -259,6 +258,13 @@ public class IndexMetaData implements Diffable, ToXContent { Setting.Property.Dynamic, Setting.Property.IndexScope); + /** + * an internal index format description, allowing us to find out if this index is upgraded or needs upgrading + */ + private static final String INDEX_FORMAT = "index.format"; + public static final Setting INDEX_FORMAT_SETTING = + Setting.intSetting(INDEX_FORMAT, 0, Setting.Property.IndexScope, Setting.Property.Final); + public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations"; static final String KEY_VERSION = "version"; static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards"; @@ -1051,6 +1057,7 @@ public class IndexMetaData implements Diffable, ToXContent { } final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); + return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards); @@ -1318,7 +1325,7 @@ public class IndexMetaData implements Diffable, ToXContent { * @param sourceIndexMetadata the metadata of the source index * @param targetNumberOfShards the total number of shards in the target index * @return the routing factor for and shrunk index with the given number of target shards. - * @throws IllegalArgumentException if the number of source shards is greater than the number of target shards or if the source shards + * @throws IllegalArgumentException if the number of source shards is less than the number of target shards or if the source shards * are not divisible by the number of target shards. */ public static int getRoutingFactor(IndexMetaData sourceIndexMetadata, int targetNumberOfShards) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 0841dd3c6bf..8a3d53a1d12 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -29,8 +29,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -58,8 +56,6 @@ public class IndexNameExpressionResolver extends AbstractComponent { private final List expressionResolvers; private final DateMathExpressionResolver dateMathExpressionResolver; - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(IndexNameExpressionResolver.class)); public IndexNameExpressionResolver(Settings settings) { super(settings); @@ -592,7 +588,6 @@ public class IndexNameExpressionResolver extends AbstractComponent { private Set innerResolve(Context context, List expressions, IndicesOptions options, MetaData metaData) { Set result = null; boolean wildcardSeen = false; - boolean plusSeen = false; for (int i = 0; i < expressions.size(); i++) { String expression = expressions.get(i); if (aliasOrIndexExists(metaData, expression)) { @@ -605,14 +600,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { throw infe(expression); } boolean add = true; - if (expression.charAt(0) == '+') { - // if its the first, add empty result set - plusSeen = true; - if (i == 0) { - result = new HashSet<>(); - } - expression = expression.substring(1); - } else if (expression.charAt(0) == '-') { + if (expression.charAt(0) == '-') { // if there is a negation without a wildcard being previously seen, add it verbatim, // otherwise return the expression if (wildcardSeen) { @@ -655,9 +643,6 @@ public class IndexNameExpressionResolver extends AbstractComponent { wildcardSeen = true; } } - if (plusSeen) { - DEPRECATION_LOGGER.deprecated("support for '+' as part of index expressions is deprecated"); - } return result; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index b22106d9710..cae2042f52f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -387,6 +387,14 @@ public class IndexTemplateMetaData extends AbstractDiffable { /** * Converts the serialized compressed form of the mappings into a parsed map. */ - public Map sourceAsMap() throws IOException { + public Map sourceAsMap() throws ElasticsearchParseException { Map mapping = XContentHelper.convertToMap(source.compressedReference(), true).v2(); if (mapping.size() == 1 && mapping.containsKey(type())) { // the type name is the root value, reduce it @@ -182,7 +183,7 @@ public class MappingMetaData extends AbstractDiffable { /** * Converts the serialized compressed form of the mappings into a parsed map. */ - public Map getSourceAsMap() throws IOException { + public Map getSourceAsMap() throws ElasticsearchParseException { return sourceAsMap(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 43c13087dd0..0a2830e55fc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -91,6 +91,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Predicate; +import java.util.stream.IntStream; import static org.elasticsearch.action.support.ContextPreservingActionListener.wrapPreservingContext; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; @@ -340,19 +341,44 @@ public class MetaDataCreateIndexService extends AbstractComponent { indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName()); indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); final Index shrinkFromIndex = request.shrinkFrom(); - int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());; - if (shrinkFromIndex != null) { - prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, - request.index()); - IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex); + final IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()); + + final int routingNumShards; + if (shrinkFromIndex == null) { + routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build()); + } else { + final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex); routingNumShards = sourceMetaData.getRoutingNumShards(); } + tmpImdBuilder.setRoutingNumShards(routingNumShards); + + if (shrinkFromIndex != null) { + prepareShrinkIndexSettings( + currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index()); + } + final Settings actualIndexSettings = indexSettingsBuilder.build(); + tmpImdBuilder.settings(actualIndexSettings); + + if (shrinkFromIndex != null) { + /* + * We need to arrange that the primary term on all the shards in the shrunken index is at least as large as + * the maximum primary term on all the shards in the source index. This ensures that we have correct + * document-level semantics regarding sequence numbers in the shrunken index. + */ + final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex); + final long primaryTerm = + IntStream + .range(0, sourceMetaData.getNumberOfShards()) + .mapToLong(sourceMetaData::primaryTerm) + .max() + .getAsLong(); + for (int shardId = 0; shardId < tmpImdBuilder.numberOfShards(); shardId++) { + tmpImdBuilder.primaryTerm(shardId, primaryTerm); + } + } - Settings actualIndexSettings = indexSettingsBuilder.build(); - IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()) - .setRoutingNumShards(routingNumShards); // Set up everything, now locally create the index to see that things are ok, and apply - final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build(); + final IndexMetaData tmpImd = tmpImdBuilder.build(); ActiveShardCount waitForActiveShards = request.waitForActiveShards(); if (waitForActiveShards == ActiveShardCount.DEFAULT) { waitForActiveShards = tmpImd.getWaitForActiveShards(); @@ -408,6 +434,11 @@ public class MetaDataCreateIndexService extends AbstractComponent { final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()) .settings(actualIndexSettings) .setRoutingNumShards(routingNumShards); + + for (int shardId = 0; shardId < tmpImd.getNumberOfShards(); shardId++) { + indexMetaDataBuilder.primaryTerm(shardId, tmpImd.primaryTerm(shardId)); + } + for (MappingMetaData mappingMd : mappingsMetaData.values()) { indexMetaDataBuilder.putMapping(mappingMd); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java new file mode 100644 index 00000000000..5f3b9cdf2da --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.indices.IndexTemplateMissingException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.UnaryOperator; + +import static java.util.Collections.singletonMap; + +/** + * Upgrades Templates on behalf of installed {@link Plugin}s when a node joins the cluster + */ +public class TemplateUpgradeService extends AbstractComponent implements ClusterStateListener { + private final UnaryOperator> indexTemplateMetaDataUpgraders; + + public final ClusterService clusterService; + + public final ThreadPool threadPool; + + public final Client client; + + private final AtomicInteger updatesInProgress = new AtomicInteger(); + + private ImmutableOpenMap lastTemplateMetaData; + + public TemplateUpgradeService(Settings settings, Client client, ClusterService clusterService, ThreadPool threadPool, + Collection>> indexTemplateMetaDataUpgraders) { + super(settings); + this.client = client; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.indexTemplateMetaDataUpgraders = templates -> { + Map upgradedTemplates = new HashMap<>(templates); + for (UnaryOperator> upgrader : indexTemplateMetaDataUpgraders) { + upgradedTemplates = upgrader.apply(upgradedTemplates); + } + return upgradedTemplates; + }; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + ClusterState state = event.state(); + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // wait until the gateway has recovered from disk, otherwise we think may not have the index templates, + // while they actually do exist + return; + } + + if (updatesInProgress.get() > 0) { + // we are already running some updates - skip this cluster state update + return; + } + + ImmutableOpenMap templates = state.getMetaData().getTemplates(); + + if (templates == lastTemplateMetaData) { + // we already checked these sets of templates - no reason to check it again + // we can do identity check here because due to cluster state diffs the actual map will not change + // if there were no changes + return; + } + + if (shouldLocalNodeUpdateTemplates(state.nodes()) == false) { + return; + } + + lastTemplateMetaData = templates; + Optional, Set>> changes = calculateTemplateChanges(templates); + if (changes.isPresent()) { + logger.info("Starting template upgrade to version {}, {} templates will be updated and {} will be removed", + Version.CURRENT, + changes.get().v1().size(), + changes.get().v2().size()); + if (updatesInProgress.compareAndSet(0, changes.get().v1().size() + changes.get().v2().size())) { + threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2())); + } + } + } + + /** + * Checks if the current node should update the templates + * + * If the master has the newest verison in the cluster - it will be dedicated template updater. + * Otherwise the node with the highest id among nodes with the highest version should update the templates + */ + boolean shouldLocalNodeUpdateTemplates(DiscoveryNodes nodes) { + DiscoveryNode localNode = nodes.getLocalNode(); + // Only data and master nodes should update the template + if (localNode.isDataNode() || localNode.isMasterNode()) { + DiscoveryNode masterNode = nodes.getMasterNode(); + if (masterNode == null) { + return false; + } + Version maxVersion = nodes.getLargestNonClientNodeVersion(); + if (maxVersion.equals(masterNode.getVersion())) { + // If the master has the latest version - we will allow it to handle the update + return nodes.isLocalNodeElectedMaster(); + } else { + if (maxVersion.equals(localNode.getVersion()) == false) { + // The localhost node doesn't have the latest version - not going to update + return false; + } + for (ObjectCursor node : nodes.getMasterAndDataNodes().values()) { + if (node.value.getVersion().equals(maxVersion) && node.value.getId().compareTo(localNode.getId()) > 0) { + // We have a node with higher id then mine - it should update + return false; + } + } + // We have the highest version and highest id - we should perform the update + return true; + } + } else { + return false; + } + } + + void updateTemplates(Map changes, Set deletions) { + for (Map.Entry change : changes.entrySet()) { + PutIndexTemplateRequest request = + new PutIndexTemplateRequest(change.getKey()).source(change.getValue(), XContentType.JSON); + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + client.admin().indices().putTemplate(request, new ActionListener() { + @Override + public void onResponse(PutIndexTemplateResponse response) { + if(updatesInProgress.decrementAndGet() == 0) { + logger.info("Finished upgrading templates to version {}", Version.CURRENT); + } + if (response.isAcknowledged() == false) { + logger.warn("Error updating template [{}], request was not acknowledged", change.getKey()); + } + } + + @Override + public void onFailure(Exception e) { + if(updatesInProgress.decrementAndGet() == 0) { + logger.info("Templates were upgraded to version {}", Version.CURRENT); + } + logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e); + } + }); + } + + for (String template : deletions) { + DeleteIndexTemplateRequest request = new DeleteIndexTemplateRequest(template); + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + client.admin().indices().deleteTemplate(request, new ActionListener() { + @Override + public void onResponse(DeleteIndexTemplateResponse response) { + updatesInProgress.decrementAndGet(); + if (response.isAcknowledged() == false) { + logger.warn("Error deleting template [{}], request was not acknowledged", template); + } + } + + @Override + public void onFailure(Exception e) { + updatesInProgress.decrementAndGet(); + if (e instanceof IndexTemplateMissingException == false) { + // we might attempt to delete the same template from different nodes - so that's ok if template doesn't exist + // otherwise we need to warn + logger.warn(new ParameterizedMessage("Error deleting template [{}]", template), e); + } + } + }); + } + } + + int getUpdatesInProgress() { + return updatesInProgress.get(); + } + + Optional, Set>> calculateTemplateChanges( + ImmutableOpenMap templates) { + // collect current templates + Map existingMap = new HashMap<>(); + for (ObjectObjectCursor customCursor : templates) { + existingMap.put(customCursor.key, customCursor.value); + } + // upgrade global custom meta data + Map upgradedMap = indexTemplateMetaDataUpgraders.apply(existingMap); + if (upgradedMap.equals(existingMap) == false) { + Set deletes = new HashSet<>(); + Map changes = new HashMap<>(); + // remove templates if needed + existingMap.keySet().forEach(s -> { + if (upgradedMap.containsKey(s) == false) { + deletes.add(s); + } + }); + upgradedMap.forEach((key, value) -> { + if (value.equals(existingMap.get(key)) == false) { + changes.put(key, toBytesReference(value)); + } + }); + return Optional.of(new Tuple<>(changes, deletes)); + } + return Optional.empty(); + } + + private static final ToXContent.Params PARAMS = new ToXContent.MapParams(singletonMap("reduce_mappings", "true")); + + private BytesReference toBytesReference(IndexTemplateMetaData templateMetaData) { + try { + return XContentHelper.toXContent((builder, params) -> { + IndexTemplateMetaData.Builder.toInnerXContent(templateMetaData, builder, params); + return builder; + }, XContentType.JSON, PARAMS, false); + } catch (IOException ex) { + throw new IllegalStateException("Cannot serialize template [" + templateMetaData.getName() + "]", ex); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index c38f556a0a8..2ed88aa1127 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -56,13 +56,14 @@ public class DiscoveryNodes extends AbstractDiffable implements private final String masterNodeId; private final String localNodeId; private final Version minNonClientNodeVersion; + private final Version maxNonClientNodeVersion; private final Version maxNodeVersion; private final Version minNodeVersion; private DiscoveryNodes(ImmutableOpenMap nodes, ImmutableOpenMap dataNodes, ImmutableOpenMap masterNodes, ImmutableOpenMap ingestNodes, - String masterNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNodeVersion, - Version minNodeVersion) { + String masterNodeId, String localNodeId, Version minNonClientNodeVersion, Version maxNonClientNodeVersion, + Version maxNodeVersion, Version minNodeVersion) { this.nodes = nodes; this.dataNodes = dataNodes; this.masterNodes = masterNodes; @@ -70,6 +71,7 @@ public class DiscoveryNodes extends AbstractDiffable implements this.masterNodeId = masterNodeId; this.localNodeId = localNodeId; this.minNonClientNodeVersion = minNonClientNodeVersion; + this.maxNonClientNodeVersion = maxNonClientNodeVersion; this.minNodeVersion = minNodeVersion; this.maxNodeVersion = maxNodeVersion; } @@ -234,12 +236,25 @@ public class DiscoveryNodes extends AbstractDiffable implements /** * Returns the version of the node with the oldest version in the cluster that is not a client node * + * If there are no non-client nodes, Version.CURRENT will be returned. + * * @return the oldest version in the cluster */ public Version getSmallestNonClientNodeVersion() { return minNonClientNodeVersion; } + /** + * Returns the version of the node with the youngest version in the cluster that is not a client node. + * + * If there are no non-client nodes, Version.CURRENT will be returned. + * + * @return the youngest version in the cluster + */ + public Version getLargestNonClientNodeVersion() { + return maxNonClientNodeVersion; + } + /** * Returns the version of the node with the oldest version in the cluster. * @@ -252,7 +267,7 @@ public class DiscoveryNodes extends AbstractDiffable implements /** * Returns the version of the node with the youngest version in the cluster * - * @return the oldest version in the cluster + * @return the youngest version in the cluster */ public Version getMaxNodeVersion() { return maxNodeVersion; @@ -654,15 +669,25 @@ public class DiscoveryNodes extends AbstractDiffable implements ImmutableOpenMap.Builder ingestNodesBuilder = ImmutableOpenMap.builder(); Version minNodeVersion = Version.CURRENT; Version maxNodeVersion = Version.CURRENT; - Version minNonClientNodeVersion = Version.CURRENT; + // The node where we are building this on might not be a master or a data node, so we cannot assume + // that there is a node with the current version as a part of the cluster. + Version minNonClientNodeVersion = null; + Version maxNonClientNodeVersion = null; for (ObjectObjectCursor nodeEntry : nodes) { if (nodeEntry.value.isDataNode()) { dataNodesBuilder.put(nodeEntry.key, nodeEntry.value); - minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion()); } if (nodeEntry.value.isMasterNode()) { masterNodesBuilder.put(nodeEntry.key, nodeEntry.value); - minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion()); + } + if (nodeEntry.value.isDataNode() || nodeEntry.value.isMasterNode()) { + if (minNonClientNodeVersion == null) { + minNonClientNodeVersion = nodeEntry.value.getVersion(); + maxNonClientNodeVersion = nodeEntry.value.getVersion(); + } else { + minNonClientNodeVersion = Version.min(minNonClientNodeVersion, nodeEntry.value.getVersion()); + maxNonClientNodeVersion = Version.max(maxNonClientNodeVersion, nodeEntry.value.getVersion()); + } } if (nodeEntry.value.isIngestNode()) { ingestNodesBuilder.put(nodeEntry.key, nodeEntry.value); @@ -673,7 +698,8 @@ public class DiscoveryNodes extends AbstractDiffable implements return new DiscoveryNodes( nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), ingestNodesBuilder.build(), - masterNodeId, localNodeId, minNonClientNodeVersion, maxNodeVersion, minNodeVersion + masterNodeId, localNodeId, minNonClientNodeVersion == null ? Version.CURRENT : minNonClientNodeVersion, + maxNonClientNodeVersion == null ? Version.CURRENT : maxNonClientNodeVersion, maxNodeVersion, minNodeVersion ); } diff --git a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java index 67c8d508ba5..9813a7ab0f9 100644 --- a/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/cache/CacheBuilder.java @@ -91,7 +91,7 @@ public class CacheBuilder { } public Cache build() { - Cache cache = new Cache(); + Cache cache = new Cache<>(); if (maximumWeight != -1) { cache.setMaximumWeight(maximumWeight); } diff --git a/core/src/main/java/org/elasticsearch/index/get/GetField.java b/core/src/main/java/org/elasticsearch/common/document/DocumentField.java similarity index 70% rename from core/src/main/java/org/elasticsearch/index/get/GetField.java rename to core/src/main/java/org/elasticsearch/common/document/DocumentField.java index 928988e3d3d..e1cbec8b477 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetField.java +++ b/core/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.get; +package org.elasticsearch.common.document; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -25,7 +25,9 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.search.SearchHit; import java.io.IOException; import java.util.ArrayList; @@ -36,34 +38,52 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.parseStoredFieldsValue; -public class GetField implements Streamable, ToXContent, Iterable { +/** + * A single field name and values part of {@link SearchHit} and {@link GetResult}. + * + * @see SearchHit + * @see GetResult + */ +public class DocumentField implements Streamable, ToXContent, Iterable { private String name; private List values; - private GetField() { + private DocumentField() { } - public GetField(String name, List values) { + public DocumentField(String name, List values) { this.name = Objects.requireNonNull(name, "name must not be null"); this.values = Objects.requireNonNull(values, "values must not be null"); } + /** + * The name of the field. + */ public String getName() { return name; } - public Object getValue() { - if (values != null && !values.isEmpty()) { - return values.get(0); + /** + * The first value of the hit. + */ + public V getValue() { + if (values == null || values.isEmpty()) { + return null; } - return null; + return (V)values.get(0); } + /** + * The field values. + */ public List getValues() { return values; } + /** + * @return The field is a metadata field + */ public boolean isMetadataField() { return MapperService.isMetadataField(name); } @@ -73,8 +93,8 @@ public class GetField implements Streamable, ToXContent, Iterable { return values.iterator(); } - public static GetField readGetField(StreamInput in) throws IOException { - GetField result = new GetField(); + public static DocumentField readDocumentField(StreamInput in) throws IOException { + DocumentField result = new DocumentField(); result.readFrom(in); return result; } @@ -102,25 +122,26 @@ public class GetField implements Streamable, ToXContent, Iterable { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray(name); for (Object value : values) { - //this call doesn't really need to support writing any kind of object. - //Stored fields values are converted using MappedFieldType#valueForDisplay. - //As a result they can either be Strings, Numbers, Booleans, or BytesReference, that's all. + // this call doesn't really need to support writing any kind of object. + // Stored fields values are converted using MappedFieldType#valueForDisplay. + // As a result they can either be Strings, Numbers, Booleans, or BytesReference, that's + // all. builder.value(value); } builder.endArray(); return builder; } - public static GetField fromXContent(XContentParser parser) throws IOException { + public static DocumentField fromXContent(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); String fieldName = parser.currentName(); XContentParser.Token token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.START_ARRAY, token, parser::getTokenLocation); List values = new ArrayList<>(); - while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { values.add(parseStoredFieldsValue(parser)); } - return new GetField(fieldName, values); + return new DocumentField(fieldName, values); } @Override @@ -131,9 +152,8 @@ public class GetField implements Streamable, ToXContent, Iterable { if (o == null || getClass() != o.getClass()) { return false; } - GetField objects = (GetField) o; - return Objects.equals(name, objects.name) && - Objects.equals(values, objects.values); + DocumentField objects = (DocumentField) o; + return Objects.equals(name, objects.name) && Objects.equals(values, objects.values); } @Override @@ -143,9 +163,9 @@ public class GetField implements Streamable, ToXContent, Iterable { @Override public String toString() { - return "GetField{" + + return "DocumentField{" + "name='" + name + '\'' + ", values=" + values + '}'; } -} +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 52550f1ba67..10adf530b1e 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -272,7 +272,7 @@ public class Lucene { public static TopDocs readTopDocs(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { - int totalHits = in.readVInt(); + long totalHits = in.readVLong(); float maxScore = in.readFloat(); ScoreDoc[] scoreDocs = new ScoreDoc[in.readVInt()]; @@ -281,7 +281,7 @@ public class Lucene { } return new TopDocs(totalHits, scoreDocs, maxScore); } else if (type == 1) { - int totalHits = in.readVInt(); + long totalHits = in.readVLong(); float maxScore = in.readFloat(); SortField[] fields = new SortField[in.readVInt()]; @@ -385,7 +385,7 @@ public class Lucene { out.writeByte((byte) 2); CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs; - out.writeVInt(topDocs.totalHits); + out.writeVLong(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); out.writeString(collapseDocs.field); @@ -405,7 +405,7 @@ public class Lucene { out.writeByte((byte) 1); TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs; - out.writeVInt(topDocs.totalHits); + out.writeVLong(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); out.writeVInt(topFieldDocs.fields.length); @@ -419,7 +419,7 @@ public class Lucene { } } else { out.writeByte((byte) 0); - out.writeVInt(topDocs.totalHits); + out.writeVLong(topDocs.totalHits); out.writeFloat(topDocs.getMaxScore()); out.writeVInt(topDocs.scoreDocs.length); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 112bf271c4e..1113ab9cd93 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -119,7 +119,7 @@ public class ScriptScoreFunction extends ScoreFunction { @Override public boolean needsScores() { - return script.needsScores(); + return script.needs_score(); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index e8b47783afb..2b37c338c9a 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.lucene.uid; * under the License. */ -import org.apache.lucene.index.Fields; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -43,8 +42,11 @@ import java.io.IOException; * not thread safe, so it is the caller's job to create and use one * instance of this per thread. Do not use this if a term may appear * in more than one document! It will only return the first one it - * finds. */ - + * finds. + * This class uses live docs, so it should be cached based on the + * {@link org.apache.lucene.index.IndexReader#getReaderCacheHelper() reader cache helper} + * rather than the {@link LeafReader#getCoreCacheHelper() core cache helper}. + */ final class PerThreadIDVersionAndSeqNoLookup { // TODO: do we really need to store all this stuff? some if it might not speed up anything. // we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff @@ -64,8 +66,7 @@ final class PerThreadIDVersionAndSeqNoLookup { */ PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { this.uidField = uidField; - Fields fields = reader.fields(); - Terms terms = fields.terms(uidField); + Terms terms = reader.terms(uidField); if (terms == null) { throw new IllegalArgumentException("reader misses the [" + uidField + "] field"); } @@ -79,12 +80,17 @@ final class PerThreadIDVersionAndSeqNoLookup { this.readerKey = readerKey; } - /** Return null if id is not found. */ - public DocIdAndVersion lookupVersion(BytesRef id, Bits liveDocs, LeafReaderContext context) + /** Return null if id is not found. + * We pass the {@link LeafReaderContext} as an argument so that things + * still work with reader wrappers that hide some documents while still + * using the same cache key. Otherwise we'd have to disable caching + * entirely for these readers. + */ + public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context) throws IOException { assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) : "context's reader is not the same as the reader class was initialized on."; - int docID = getDocID(id, liveDocs); + int docID = getDocID(id, context.reader().getLiveDocs()); if (docID != DocIdSetIterator.NO_MORE_DOCS) { final NumericDocValues versions = context.reader().getNumericDocValues(VersionFieldMapper.NAME); @@ -122,10 +128,10 @@ final class PerThreadIDVersionAndSeqNoLookup { } /** Return null if id is not found. */ - DocIdAndSeqNo lookupSeqNo(BytesRef id, Bits liveDocs, LeafReaderContext context) throws IOException { + DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException { assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) : "context's reader is not the same as the reader class was initialized on."; - int docID = getDocID(id, liveDocs); + int docID = getDocID(id, context.reader().getLiveDocs()); if (docID != DocIdSetIterator.NO_MORE_DOCS) { NumericDocValues seqNos = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME); long seqNo; @@ -139,18 +145,4 @@ final class PerThreadIDVersionAndSeqNoLookup { return null; } } - - /** - * returns 0 if the primary term is not found. - * - * Note that 0 is an illegal primary term. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)} - **/ - long lookUpPrimaryTerm(int docID, LeafReader reader) throws IOException { - NumericDocValues primaryTerms = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); - if (primaryTerms != null && primaryTerms.advanceExact(docID)) { - return primaryTerms.longValue(); - } else { - return 0; - } - } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 3cdbfa38b62..126e4dee51c 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -20,11 +20,12 @@ package org.elasticsearch.common.lucene.uid; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; import java.io.IOException; import java.util.List; @@ -36,26 +37,31 @@ import static org.elasticsearch.common.lucene.uid.Versions.NOT_FOUND; /** Utility class to resolve the Lucene doc ID, version, seqNo and primaryTerms for a given uid. */ public final class VersionsAndSeqNoResolver { - static final ConcurrentMap> lookupStates = + static final ConcurrentMap> lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); // Evict this reader from lookupStates once it's closed: private static final IndexReader.ClosedListener removeLookupState = key -> { - CloseableThreadLocal ctl = lookupStates.remove(key); + CloseableThreadLocal ctl = lookupStates.remove(key); if (ctl != null) { ctl.close(); } }; - private static PerThreadIDVersionAndSeqNoLookup getLookupState(LeafReader reader, String uidField) throws IOException { - IndexReader.CacheHelper cacheHelper = reader.getCoreCacheHelper(); - CloseableThreadLocal ctl = lookupStates.get(cacheHelper.getKey()); + private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, String uidField) throws IOException { + // We cache on the top level + // This means cache entries have a shorter lifetime, maybe as low as 1s with the + // default refresh interval and a steady indexing rate, but on the other hand it + // proved to be cheaper than having to perform a CHM and a TL get for every segment. + // See https://github.com/elastic/elasticsearch/pull/19856. + IndexReader.CacheHelper cacheHelper = reader.getReaderCacheHelper(); + CloseableThreadLocal ctl = lookupStates.get(cacheHelper.getKey()); if (ctl == null) { // First time we are seeing this reader's core; make a new CTL: ctl = new CloseableThreadLocal<>(); - CloseableThreadLocal other = lookupStates.putIfAbsent(cacheHelper.getKey(), ctl); + CloseableThreadLocal other = lookupStates.putIfAbsent(cacheHelper.getKey(), ctl); if (other == null) { - // Our CTL won, we must remove it when the core is closed: + // Our CTL won, we must remove it when the reader is closed: cacheHelper.addClosedListener(removeLookupState); } else { // Another thread beat us to it: just use their CTL: @@ -63,13 +69,22 @@ public final class VersionsAndSeqNoResolver { } } - PerThreadIDVersionAndSeqNoLookup lookupState = ctl.get(); + PerThreadIDVersionAndSeqNoLookup[] lookupState = ctl.get(); if (lookupState == null) { - lookupState = new PerThreadIDVersionAndSeqNoLookup(reader, uidField); + lookupState = new PerThreadIDVersionAndSeqNoLookup[reader.leaves().size()]; + for (LeafReaderContext leaf : reader.leaves()) { + lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), uidField); + } ctl.set(lookupState); - } else if (Objects.equals(lookupState.uidField, uidField) == false) { + } + + if (lookupState.length != reader.leaves().size()) { + throw new AssertionError("Mismatched numbers of leaves: " + lookupState.length + " != " + reader.leaves().size()); + } + + if (lookupState.length > 0 && Objects.equals(lookupState[0].uidField, uidField) == false) { throw new AssertionError("Index does not consistently use the same uid field: [" - + uidField + "] != [" + lookupState.uidField + "]"); + + uidField + "] != [" + lookupState[0].uidField + "]"); } return lookupState; @@ -112,17 +127,14 @@ public final class VersionsAndSeqNoResolver { * */ public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) throws IOException { + PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field()); List leaves = reader.leaves(); - if (leaves.isEmpty()) { - return null; - } // iterate backwards to optimize for the frequently updated documents // which are likely to be in the last segments for (int i = leaves.size() - 1; i >= 0; i--) { - LeafReaderContext context = leaves.get(i); - LeafReader leaf = context.reader(); - PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf, term.field()); - DocIdAndVersion result = lookup.lookupVersion(term.bytes(), leaf.getLiveDocs(), context); + final LeafReaderContext leaf = leaves.get(i); + PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; + DocIdAndVersion result = lookup.lookupVersion(term.bytes(), leaf); if (result != null) { return result; } @@ -137,17 +149,14 @@ public final class VersionsAndSeqNoResolver { * */ public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException { + PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field()); List leaves = reader.leaves(); - if (leaves.isEmpty()) { - return null; - } // iterate backwards to optimize for the frequently updated documents // which are likely to be in the last segments for (int i = leaves.size() - 1; i >= 0; i--) { - LeafReaderContext context = leaves.get(i); - LeafReader leaf = context.reader(); - PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf, term.field()); - DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf.getLiveDocs(), context); + final LeafReaderContext leaf = leaves.get(i); + PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; + DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf); if (result != null) { return result; } @@ -159,9 +168,13 @@ public final class VersionsAndSeqNoResolver { * Load the primaryTerm associated with the given {@link DocIdAndSeqNo} */ public static long loadPrimaryTerm(DocIdAndSeqNo docIdAndSeqNo, String uidField) throws IOException { - LeafReader leaf = docIdAndSeqNo.context.reader(); - PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf, uidField); - long result = lookup.lookUpPrimaryTerm(docIdAndSeqNo.docId, leaf); + NumericDocValues primaryTerms = docIdAndSeqNo.context.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); + long result; + if (primaryTerms != null && primaryTerms.advanceExact(docIdAndSeqNo.docId)) { + result = primaryTerms.longValue(); + } else { + result = 0; + } assert result > 0 : "should always resolve a primary term for a resolved sequence number. primary_term [" + result + "]" + " docId [" + docIdAndSeqNo.docId + "] seqNo [" + docIdAndSeqNo.seqNo + "]"; return result; diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index d8ee93fe882..15dffc427e7 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -314,12 +314,8 @@ public final class ClusterSettings extends AbstractScopedSettings { HunspellService.HUNSPELL_IGNORE_CASE, HunspellService.HUNSPELL_DICTIONARY_OPTIONS, IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, - Environment.DEFAULT_PATH_CONF_SETTING, - Environment.PATH_CONF_SETTING, - Environment.DEFAULT_PATH_DATA_SETTING, Environment.PATH_DATA_SETTING, Environment.PATH_HOME_SETTING, - Environment.DEFAULT_PATH_LOGS_SETTING, Environment.PATH_LOGS_SETTING, Environment.PATH_REPO_SETTING, Environment.PATH_SHARED_DATA_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 9fcafcea3b2..890a43107c5 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -77,6 +77,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, IndexMetaData.INDEX_PRIORITY_SETTING, IndexMetaData.INDEX_DATA_PATH_SETTING, + IndexMetaData.INDEX_FORMAT_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, @@ -127,6 +128,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING, IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, + IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING, + IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING, IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY, FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.COERCE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java index c2345f2ddd8..16818341cbd 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java @@ -32,7 +32,7 @@ public class KeyStoreCli extends MultiCommand { subcommands.put("create", new CreateKeyStoreCommand()); subcommands.put("list", new ListKeyStoreCommand()); subcommands.put("add", new AddStringKeyStoreCommand()); - subcommands.put("add-file", new AddStringKeyStoreCommand()); + subcommands.put("add-file", new AddFileKeyStoreCommand()); subcommands.put("remove", new RemoveSettingKeyStoreCommand()); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java index c5a364f5473..98f980c1ec6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.settings; import java.io.Closeable; +import java.io.IOException; import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.Set; @@ -40,4 +41,7 @@ public interface SecureSettings extends Closeable { /** Return a file setting. The {@link InputStream} should be closed once it is used. */ InputStream getFile(String setting) throws GeneralSecurityException; + + @Override + void close() throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index d81204cfb21..241315144a9 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -345,7 +345,7 @@ public class Setting extends ToXContentToBytes { /** Logs a deprecation warning if the setting is deprecated and used. */ protected void checkDeprecation(Settings settings) { // They're using the setting, so we need to tell them to stop - if (this.isDeprecated() && this.exists(settings)) { + if (this.isDeprecated() && this.exists(settings) && settings.addDeprecatedSetting(this)) { // It would be convenient to show its replacement key, but replacement is often not so simple final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(getClass())); deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and will be removed in a future release! " + diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index f71ddccd9d3..8412f57fd89 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -55,7 +55,6 @@ import java.util.Dictionary; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -63,6 +62,7 @@ import java.util.NoSuchElementException; import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Predicate; @@ -93,6 +93,22 @@ public final class Settings implements ToXContent { /** The first level of setting names. This is constructed lazily in {@link #names()}. */ private final SetOnce> firstLevelNames = new SetOnce<>(); + /** + * The set of deprecated settings tracked by this settings object. + */ + private final Set deprecatedSettings = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + /** + * Add the setting as a tracked deprecated setting. + * + * @param setting the deprecated setting to track + * @return true if the setting was not already tracked as a deprecated setting, otherwise false + */ + boolean addDeprecatedSetting(final Setting setting) { + assert setting.isDeprecated() && setting.exists(this) : setting.getKey(); + return deprecatedSettings.add(setting.getKey()); + } + /** * Setting names found in this Settings for both string and secure settings. * This is constructed lazily in {@link #keySet()}. @@ -610,8 +626,10 @@ public final class Settings implements ToXContent { } public static void writeSettingsToStream(Settings settings, StreamOutput out) throws IOException { - out.writeVInt(settings.size()); - for (Map.Entry entry : settings.getAsMap().entrySet()) { + // pull getAsMap() to exclude secure settings in size() + Set> entries = settings.getAsMap().entrySet(); + out.writeVInt(entries.size()); + for (Map.Entry entry : entries) { out.writeString(entry.getKey()); out.writeOptionalString(entry.getValue()); } @@ -716,6 +734,10 @@ public final class Settings implements ToXContent { if (secureSettings.isLoaded() == false) { throw new IllegalStateException("Secure settings must already be loaded"); } + if (this.secureSettings.get() != null) { + throw new IllegalArgumentException("Secure settings already set. Existing settings: " + + this.secureSettings.get().getSettingNames() + ", new settings: " + secureSettings.getSettingNames()); + } this.secureSettings.set(secureSettings); return this; } diff --git a/core/src/main/java/org/elasticsearch/common/util/set/Sets.java b/core/src/main/java/org/elasticsearch/common/util/set/Sets.java index f2bba5cde36..0f1fe22c020 100644 --- a/core/src/main/java/org/elasticsearch/common/util/set/Sets.java +++ b/core/src/main/java/org/elasticsearch/common/util/set/Sets.java @@ -71,12 +71,31 @@ public final class Sets { return !left.stream().anyMatch(k -> right.contains(k)); } + /** + * The relative complement, or difference, of the specified left and right set. Namely, the resulting set contains all the elements that + * are in the left set but not in the right set. Neither input is mutated by this operation, an entirely new set is returned. + * + * @param left the left set + * @param right the right set + * @param the type of the elements of the sets + * @return the relative complement of the left set with respect to the right set + */ public static Set difference(Set left, Set right) { Objects.requireNonNull(left); Objects.requireNonNull(right); return left.stream().filter(k -> !right.contains(k)).collect(Collectors.toSet()); } + /** + * The relative complement, or difference, of the specified left and right set, returned as a sorted set. Namely, the resulting set + * contains all the elements that are in the left set but not in the right set, and the set is sorted using the natural ordering of + * element type. Neither input is mutated by this operation, an entirely new set is returned. + * + * @param left the left set + * @param right the right set + * @param the type of the elements of the sets + * @return the sorted relative complement of the left set with respect to the right set + */ public static SortedSet sortedDifference(Set left, Set right) { Objects.requireNonNull(left); Objects.requireNonNull(right); diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java index 30199afa98c..e28b44b42c5 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentParserUtils.java @@ -23,10 +23,10 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentParser.Token; -import org.elasticsearch.rest.action.search.RestSearchAction; import java.io.IOException; import java.util.Locale; +import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -115,29 +115,39 @@ public final class XContentParserUtils { * (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, * "#" is the delimiter and "foo" the name of the object to parse). * + * It also expected that following this field name is either an Object or an array xContent structure and + * the cursor points to the start token of this structure. + * * The method splits the field's name to extract the type and name and then parses the object * using the {@link XContentParser#namedObject(Class, String, Object)} method. * * @param parser the current {@link XContentParser} * @param delimiter the delimiter to use to splits the field's name * @param objectClass the object class of the object to parse + * @param consumer something to consume the parsed object * @param the type of the object to parse - * @return the parsed object * @throws IOException if anything went wrong during parsing or if the type or name cannot be derived * from the field's name + * @throws ParsingException if the parser isn't positioned on either START_OBJECT or START_ARRAY at the beginning */ - public static T parseTypedKeysObject(XContentParser parser, String delimiter, Class objectClass) throws IOException { + public static void parseTypedKeysObject(XContentParser parser, String delimiter, Class objectClass, Consumer consumer) + throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT && parser.currentToken() != XContentParser.Token.START_ARRAY) { + throwUnknownToken(parser.currentToken(), parser.getTokenLocation()); + } String currentFieldName = parser.currentName(); if (Strings.hasLength(currentFieldName)) { int position = currentFieldName.indexOf(delimiter); if (position > 0) { String type = currentFieldName.substring(0, position); String name = currentFieldName.substring(position + 1); - return parser.namedObject(objectClass, type, name); + consumer.accept(parser.namedObject(objectClass, type, name)); + return; } + // if we didn't find a delimiter we ignore the object or array for forward compatibility instead of throwing an error + parser.skipChildren(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Failed to parse object: empty key"); } - throw new ParsingException(parser.getTokenLocation(), "Cannot parse object of class [" + objectClass.getSimpleName() - + "] without type information. Set [" + RestSearchAction.TYPED_KEYS_PARAM + "] parameter on the request to ensure the" - + " type information is added to the response output"); } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index 36eacb81f83..6e9b53a7361 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -97,7 +97,7 @@ public class XContentMapValues { } } - public static Object extractValue(String path, Map map) { + public static Object extractValue(String path, Map map) { String[] pathElements = path.split("\\."); if (pathElements.length == 0) { return null; diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index ce2b15d2d71..8f386f79dcf 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -46,16 +46,10 @@ import java.util.function.Function; // public+forbidden api! public class Environment { public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope); - public static final Setting DEFAULT_PATH_CONF_SETTING = Setting.simpleString("default.path.conf", Property.NodeScope); - public static final Setting PATH_CONF_SETTING = - new Setting<>("path.conf", DEFAULT_PATH_CONF_SETTING, Function.identity(), Property.NodeScope); - public static final Setting> DEFAULT_PATH_DATA_SETTING = - Setting.listSetting("default.path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Setting> PATH_DATA_SETTING = - Setting.listSetting("path.data", DEFAULT_PATH_DATA_SETTING, Function.identity(), Property.NodeScope); - public static final Setting DEFAULT_PATH_LOGS_SETTING = Setting.simpleString("default.path.logs", Property.NodeScope); + Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Setting PATH_LOGS_SETTING = - new Setting<>("path.logs", DEFAULT_PATH_LOGS_SETTING, Function.identity(), Property.NodeScope); + new Setting<>("path.logs", "", Function.identity(), Property.NodeScope); public static final Setting> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope); @@ -92,6 +86,10 @@ public class Environment { private final Path tmpFile = PathUtils.get(System.getProperty("java.io.tmpdir")); public Environment(Settings settings) { + this(settings, null); + } + + public Environment(final Settings settings, final Path configPath) { final Path homeFile; if (PATH_HOME_SETTING.exists(settings)) { homeFile = PathUtils.get(PATH_HOME_SETTING.get(settings)).normalize(); @@ -99,9 +97,8 @@ public class Environment { throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured"); } - // this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings) - if (PATH_CONF_SETTING.exists(settings) || DEFAULT_PATH_CONF_SETTING.exists(settings)) { - configFile = PathUtils.get(PATH_CONF_SETTING.get(settings)).normalize(); + if (configPath != null) { + configFile = configPath.normalize(); } else { configFile = homeFile.resolve("config"); } @@ -137,7 +134,7 @@ public class Environment { } // this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings) - if (PATH_LOGS_SETTING.exists(settings) || DEFAULT_PATH_LOGS_SETTING.exists(settings)) { + if (PATH_LOGS_SETTING.exists(settings)) { logsFile = PathUtils.get(PATH_LOGS_SETTING.get(settings)).normalize(); } else { logsFile = homeFile.resolve("logs"); @@ -160,7 +157,6 @@ public class Environment { } finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile); this.settings = finalSettings.build(); - } /** diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 2764ffd38cc..537344ca653 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -37,6 +37,7 @@ import org.elasticsearch.node.Node; import java.util.Locale; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -111,6 +112,24 @@ public final class IndexSettings { Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, Property.IndexScope); + /** + * Controls how long translog files that are no longer needed for persistence reasons + * will be kept around before being deleted. A longer retention policy is useful to increase + * the chance of ops based recoveries. + **/ + public static final Setting INDEX_TRANSLOG_RETENTION_AGE_SETTING = + Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1), Property.Dynamic, + Property.IndexScope); + + /** + * Controls how many translog files that are no longer needed for persistence reasons + * will be kept around before being deleted. Keeping more files is useful to increase + * the chance of ops based recoveries. + **/ + public static final Setting INDEX_TRANSLOG_RETENTION_SIZE_SETTING = + Setting.byteSizeSetting("index.translog.retention.size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, + Property.IndexScope); + /** * The maximum size of a translog generation. This is independent of the maximum size of * translog operations that have not been flushed. @@ -168,6 +187,8 @@ public final class IndexSettings { private final TimeValue syncInterval; private volatile TimeValue refreshInterval; private volatile ByteSizeValue flushThresholdSize; + private volatile TimeValue translogRetentionAge; + private volatile ByteSizeValue translogRetentionSize; private volatile ByteSizeValue generationThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; private final MergePolicyConfig mergePolicyConfig; @@ -265,6 +286,8 @@ public final class IndexSettings { syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings); refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING); flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); + translogRetentionAge = scopedSettings.get(INDEX_TRANSLOG_RETENTION_AGE_SETTING); + translogRetentionSize = scopedSettings.get(INDEX_TRANSLOG_RETENTION_SIZE_SETTING); generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); @@ -302,6 +325,8 @@ public final class IndexSettings { scopedSettings.addSettingsUpdateConsumer( INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, this::setGenerationThresholdSize); + scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_RETENTION_AGE_SETTING, this::setTranslogRetentionAge); + scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_RETENTION_SIZE_SETTING, this::setTranslogRetentionSize); scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval); scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); @@ -311,6 +336,14 @@ public final class IndexSettings { this.flushThresholdSize = byteSizeValue; } + private void setTranslogRetentionSize(ByteSizeValue byteSizeValue) { + this.translogRetentionSize = byteSizeValue; + } + + private void setTranslogRetentionAge(TimeValue age) { + this.translogRetentionAge = age; + } + private void setGenerationThresholdSize(final ByteSizeValue generationThresholdSize) { this.generationThresholdSize = generationThresholdSize; } @@ -469,6 +502,16 @@ public final class IndexSettings { */ public ByteSizeValue getFlushThresholdSize() { return flushThresholdSize; } + /** + * Returns the transaction log retention size which controls how much of the translog is kept around to allow for ops based recoveries + */ + public ByteSizeValue getTranslogRetentionSize() { return translogRetentionSize; } + + /** + * Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept around + */ + public TimeValue getTranslogRetentionAge() { return translogRetentionAge; } + /** * Returns the generation threshold size. As sequence numbers can cause multiple generations to * be preserved for rollback purposes, we want to keep the size of individual generations from diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 4c17773d6df..e8134244f04 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -318,12 +318,12 @@ public final class AnalysisRegistry implements Closeable { T factory = null; if (typeName == null) { if (currentSettings.get("tokenizer") != null) { - factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); + factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); } else { throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer"); } } else if (typeName.equals("custom")) { - factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings); + factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); } if (factory != null) { factories.put(name, factory); @@ -466,7 +466,7 @@ public final class AnalysisRegistry implements Closeable { } for (Map.Entry> entry : normalizerProviders.entrySet()) { processNormalizerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), normalizers, - tokenFilterFactoryFactories, charFilterFactoryFactories); + tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories); } for (Map.Entry entry : analyzerAliases.entrySet()) { String key = entry.getKey(); @@ -585,10 +585,11 @@ public final class AnalysisRegistry implements Closeable { String name, AnalyzerProvider normalizerFactory, Map normalizers, + TokenizerFactory keywordTokenizerFactory, Map tokenFilters, Map charFilters) { if (normalizerFactory instanceof CustomNormalizerProvider) { - ((CustomNormalizerProvider) normalizerFactory).build(charFilters, tokenFilters); + ((CustomNormalizerProvider) normalizerFactory).build(keywordTokenizerFactory, charFilters, tokenFilters); } Analyzer normalizerF = normalizerFactory.get(); if (normalizerF == null) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java index 3bf5d43375c..e9654719bdc 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.analysis; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; @@ -34,13 +35,15 @@ import java.util.Map; public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final Settings analyzerSettings; + private final Environment environment; private CustomAnalyzer customAnalyzer; public CustomAnalyzerProvider(IndexSettings indexSettings, - String name, Settings settings) { + String name, Settings settings, Environment environment) { super(indexSettings, name, settings); this.analyzerSettings = settings; + this.environment = environment; } public void build(final Map tokenizers, final Map charFilters, @@ -65,6 +68,12 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider tokenFilterList = new ArrayList<>(tokenFilterNames.length); for (String tokenFilterName : tokenFilterNames) { @@ -72,14 +81,12 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider tokenFilterList, + List charFiltersList, Environment env) { + if (tokenFilter instanceof SynonymGraphTokenFilterFactory) { + List tokenFiltersListForSynonym = new ArrayList<>(tokenFilterList); + + try (CustomAnalyzer analyzer = new CustomAnalyzer(tokenizerName, tokenizer, + charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]), + tokenFiltersListForSynonym.toArray(new TokenFilterFactory[tokenFiltersListForSynonym.size()]), + TextFieldMapper.Defaults.POSITION_INCREMENT_GAP, + -1)){ + tokenFilter = ((SynonymGraphTokenFilterFactory) tokenFilter).createPerAnalyzerSynonymGraphFactory(analyzer, env); + } + + } else if (tokenFilter instanceof SynonymTokenFilterFactory) { + List tokenFiltersListForSynonym = new ArrayList<>(tokenFilterList); + try (CustomAnalyzer analyzer = new CustomAnalyzer(tokenizerName, tokenizer, + charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]), + tokenFiltersListForSynonym.toArray(new TokenFilterFactory[tokenFiltersListForSynonym.size()]), + TextFieldMapper.Defaults.POSITION_INCREMENT_GAP, + -1)) { + tokenFilter = ((SynonymTokenFilterFactory) tokenFilter).createPerAnalyzerSynonymFactory(analyzer, env); + } + } + return tokenFilter; + } + @Override public CustomAnalyzer get() { return this.customAnalyzer; diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java index b88e62242cc..a375c1e8e3b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.indices.analysis.PreBuiltTokenizers; import java.util.ArrayList; import java.util.List; @@ -44,7 +43,8 @@ public final class CustomNormalizerProvider extends AbstractIndexAnalyzerProvide this.analyzerSettings = settings; } - public void build(final Map charFilters, final Map tokenFilters) { + public void build(final TokenizerFactory keywordTokenizerFactory, final Map charFilters, + final Map tokenFilters) { String tokenizerName = analyzerSettings.get("tokenizer"); if (tokenizerName != null) { throw new IllegalArgumentException("Custom normalizer [" + name() + "] cannot configure a tokenizer"); @@ -83,7 +83,7 @@ public final class CustomNormalizerProvider extends AbstractIndexAnalyzerProvide this.customAnalyzer = new CustomAnalyzer( "keyword", - PreBuiltTokenizers.KEYWORD.getTokenizerFactory(indexSettings.getIndexVersionCreated()), + keywordTokenizerFactory, charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]), tokenFilterList.toArray(new TokenFilterFactory[tokenFilterList.size()]) ); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java index cfb37f0b075..2da3d8bc07a 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SynonymGraphTokenFilterFactory.java @@ -19,13 +19,19 @@ package org.elasticsearch.index.analysis; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.synonym.SolrSynonymParser; import org.apache.lucene.analysis.synonym.SynonymGraphFilter; +import org.apache.lucene.analysis.synonym.SynonymMap; +import org.apache.lucene.analysis.synonym.WordnetSynonymParser; +import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import java.io.IOException; +import java.io.Reader; public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory { public SynonymGraphTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry, @@ -35,7 +41,45 @@ public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory { @Override public TokenStream create(TokenStream tokenStream) { - // fst is null means no synonyms - return synonymMap.fst == null ? tokenStream : new SynonymGraphFilter(tokenStream, synonymMap, ignoreCase); + throw new IllegalStateException("Call createPerAnalyzerSynonymGraphFactory to specialize this factory for an analysis chain first"); + } + + Factory createPerAnalyzerSynonymGraphFactory(Analyzer analyzerForParseSynonym, Environment env){ + return new Factory("synonymgraph", analyzerForParseSynonym, getRulesFromSettings(env)); + } + + public class Factory implements TokenFilterFactory{ + + private final String name; + private final SynonymMap synonymMap; + + public Factory(String name, final Analyzer analyzerForParseSynonym, Reader rulesReader) { + this.name = name; + + try { + SynonymMap.Builder parser; + if ("wordnet".equalsIgnoreCase(format)) { + parser = new WordnetSynonymParser(true, expand, analyzerForParseSynonym); + ((WordnetSynonymParser) parser).parse(rulesReader); + } else { + parser = new SolrSynonymParser(true, expand, analyzerForParseSynonym); + ((SolrSynonymParser) parser).parse(rulesReader); + } + synonymMap = parser.build(); + } catch (Exception e) { + throw new IllegalArgumentException("failed to build synonyms", e); + } + } + + @Override + public String name() { + return this.name; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + // fst is null means no synonyms + return synonymMap.fst == null ? tokenStream : new SynonymGraphFilter(tokenStream, synonymMap, ignoreCase); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index 0e23089827c..0815af44007 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -23,35 +23,80 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.synonym.SolrSynonymParser; import org.apache.lucene.analysis.synonym.SynonymFilter; import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.WordnetSynonymParser; +import org.elasticsearch.Version; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.analysis.AnalysisModule; +import java.io.BufferedReader; import java.io.IOException; import java.io.Reader; +import java.nio.file.Files; import java.util.List; public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { - protected final SynonymMap synonymMap; + /** + * @deprecated this property only works with tokenizer property + */ + @Deprecated protected final boolean ignoreCase; + protected final String format; + protected final boolean expand; + protected final Settings settings; public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry, String name, Settings settings) throws IOException { super(indexSettings, name, settings); + this.settings = settings; - Reader rulesReader = null; + this.ignoreCase = + settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger); + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha3) && settings.get("ignore_case") != null) { + deprecationLogger.deprecated( + "This tokenize synonyms with whatever tokenizer and token filters appear before it in the chain. " + + "If you need ignore case with this filter, you should set lowercase filter before this"); + } + + this.expand = + settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "expand", true, deprecationLogger); + + // for backward compatibility + if (indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha3)) { + String tokenizerName = settings.get("tokenizer", "whitespace"); + AnalysisModule.AnalysisProvider tokenizerFactoryFactory = + analysisRegistry.getTokenizerProvider(tokenizerName, indexSettings); + if (tokenizerFactoryFactory == null) { + throw new IllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); + } + final TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.get(indexSettings, env, tokenizerName, + AnalysisRegistry.getSettingsFromIndexSettings(indexSettings, + AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizerName)); + this.tokenizerFactory = tokenizerFactory; + } else { + this.tokenizerFactory = null; + } + + this.format = settings.get("format", ""); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + throw new IllegalStateException("Call createPerAnalyzerSynonymFactory to specialize this factory for an analysis chain first"); + } + + protected Reader getRulesFromSettings(Environment env) { + Reader rulesReader; if (settings.getAsArray("synonyms", null) != null) { - List rules = Analysis.getWordList(env, settings, "synonyms"); + List rulesList = Analysis.getWordList(env, settings, "synonyms"); StringBuilder sb = new StringBuilder(); - for (String line : rules) { + for (String line : rulesList) { sb.append(line).append(System.lineSeparator()); } rulesReader = new FastStringReader(sb.toString()); @@ -60,49 +105,72 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { } else { throw new IllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured"); } + return rulesReader; + } - this.ignoreCase = - settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger); - boolean expand = - settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "expand", true, deprecationLogger); + Factory createPerAnalyzerSynonymFactory(Analyzer analyzerForParseSynonym, Environment env){ + return new Factory("synonym", analyzerForParseSynonym, getRulesFromSettings(env)); + } - String tokenizerName = settings.get("tokenizer", "whitespace"); - AnalysisModule.AnalysisProvider tokenizerFactoryFactory = - analysisRegistry.getTokenizerProvider(tokenizerName, indexSettings); - if (tokenizerFactoryFactory == null) { - throw new IllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); - } - final TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.get(indexSettings, env, tokenizerName, - AnalysisRegistry.getSettingsFromIndexSettings(indexSettings, AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizerName)); - Analyzer analyzer = new Analyzer() { - @Override - protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = tokenizerFactory == null ? new WhitespaceTokenizer() : tokenizerFactory.create(); - TokenStream stream = ignoreCase ? new LowerCaseFilter(tokenizer) : tokenizer; - return new TokenStreamComponents(tokenizer, stream); - } - }; + // for backward compatibility + /** + * @deprecated This filter tokenize synonyms with whatever tokenizer and token filters appear before it in the chain in 6.0. + */ + @Deprecated + protected final TokenizerFactory tokenizerFactory; - try { - SynonymMap.Builder parser = null; + public class Factory implements TokenFilterFactory{ - if ("wordnet".equalsIgnoreCase(settings.get("format"))) { - parser = new WordnetSynonymParser(true, expand, analyzer); - ((WordnetSynonymParser) parser).parse(rulesReader); + private final String name; + private final SynonymMap synonymMap; + + public Factory(String name, Analyzer analyzerForParseSynonym, Reader rulesReader) { + + this.name = name; + + Analyzer analyzer; + if (tokenizerFactory != null) { + analyzer = new Analyzer() { + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer tokenizer = tokenizerFactory.create(); + TokenStream stream = ignoreCase ? new LowerCaseFilter(tokenizer) : tokenizer; + return new TokenStreamComponents(tokenizer, stream); + } + }; } else { - parser = new SolrSynonymParser(true, expand, analyzer); - ((SolrSynonymParser) parser).parse(rulesReader); + analyzer = analyzerForParseSynonym; } - synonymMap = parser.build(); - } catch (Exception e) { - throw new IllegalArgumentException("failed to build synonyms", e); + try { + SynonymMap.Builder parser; + if ("wordnet".equalsIgnoreCase(format)) { + parser = new WordnetSynonymParser(true, expand, analyzer); + ((WordnetSynonymParser) parser).parse(rulesReader); + } else { + parser = new SolrSynonymParser(true, expand, analyzer); + ((SolrSynonymParser) parser).parse(rulesReader); + } + synonymMap = parser.build(); + } catch (Exception e) { + throw new IllegalArgumentException("failed to build synonyms", e); + } finally { + if (tokenizerFactory != null) { + analyzer.close(); + } + } + } + + @Override + public String name() { + return this.name; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + // fst is null means no synonyms + return synonymMap.fst == null ? tokenStream : new SynonymFilter(tokenStream, synonymMap, ignoreCase); } } - @Override - public TokenStream create(TokenStream tokenStream) { - // fst is null means no synonyms - return synonymMap.fst == null ? tokenStream : new SynonymFilter(tokenStream, synonymMap, ignoreCase); - } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 6e93d1feed5..d30f9629dc2 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -803,6 +803,12 @@ public abstract class Engine implements Closeable { */ public abstract CommitId flush() throws EngineException; + /** + * Rolls the translog generation and cleans unneeded. + */ + public abstract void rollTranslogGeneration() throws EngineException; + + /** * Force merges to 1 segment */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index f84f76b537e..a8f0759c1bb 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -150,7 +150,10 @@ public class InternalEngine extends Engine { } this.uidField = engineConfig.getIndexSettings().isSingleType() ? IdFieldMapper.NAME : UidFieldMapper.NAME; this.versionMap = new LiveVersionMap(); - final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy(); + final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy( + engineConfig.getIndexSettings().getTranslogRetentionSize().getBytes(), + engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis() + ); this.deletionPolicy = new CombinedDeletionPolicy( new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), translogDeletionPolicy, openMode); store.incRef(); @@ -1212,7 +1215,7 @@ public class InternalEngine extends Engine { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); - if (syncId != null && translog.totalOperations() == 0 && indexWriter.hasUncommittedChanges()) { + if (syncId != null && translog.uncommittedOperations() == 0 && indexWriter.hasUncommittedChanges()) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); @@ -1314,6 +1317,25 @@ public class InternalEngine extends Engine { return new CommitId(newCommitId); } + @Override + public void rollTranslogGeneration() throws EngineException { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + translog.rollGeneration(); + translog.trimUnreferencedReaders(); + } catch (AlreadyClosedException e) { + failOnTragicEvent(e); + throw e; + } catch (Exception e) { + try { + failEngine("translog trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to roll translog", e); + } + } + private void pruneDeletedTombstones() { long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); @@ -1854,6 +1876,10 @@ public class InternalEngine extends Engine { // the setting will be re-interpreted if it's set to true this.maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE); } + final TranslogDeletionPolicy translogDeletionPolicy = translog.getDeletionPolicy(); + final IndexSettings indexSettings = engineConfig.getIndexSettings(); + translogDeletionPolicy.setRetentionAgeInMillis(indexSettings.getTranslogRetentionAge().getMillis()); + translogDeletionPolicy.setRetentionSizeInBytes(indexSettings.getTranslogRetentionSize().getBytes()); } public MergeStats getMergeStats() { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index eab98040bbb..fa126d68132 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -180,8 +180,7 @@ public class PagedBytesIndexFieldData extends AbstractIndexOrdinalsFieldData { LeafReader reader = context.reader(); Terms terms = reader.terms(getFieldName()); - Fields fields = reader.fields(); - final Terms fieldTerms = fields.terms(getFieldName()); + final Terms fieldTerms = reader.terms(getFieldName()); if (fieldTerms instanceof FieldReader) { final Stats stats = ((FieldReader) fieldTerms).getStats(); diff --git a/core/src/main/java/org/elasticsearch/index/get/GetResult.java b/core/src/main/java/org/elasticsearch/index/get/GetResult.java index 3837a39c5c5..a47bb8be89e 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/core/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -43,14 +44,12 @@ import java.util.Objects; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.index.get.GetField.readGetField; -public class GetResult implements Streamable, Iterable, ToXContentObject { +public class GetResult implements Streamable, Iterable, ToXContentObject { - private static final String _INDEX = "_index"; - private static final String _TYPE = "_type"; - private static final String _ID = "_id"; + public static final String _INDEX = "_index"; + public static final String _TYPE = "_type"; + public static final String _ID = "_id"; private static final String _VERSION = "_version"; private static final String FOUND = "found"; private static final String FIELDS = "fields"; @@ -60,7 +59,7 @@ public class GetResult implements Streamable, Iterable, ToXContentObje private String id; private long version; private boolean exists; - private Map fields; + private Map fields; private Map sourceAsMap; private BytesReference source; private byte[] sourceAsBytes; @@ -69,7 +68,7 @@ public class GetResult implements Streamable, Iterable, ToXContentObje } public GetResult(String index, String type, String id, long version, boolean exists, BytesReference source, - Map fields) { + Map fields) { this.index = index; this.type = type; this.id = id; @@ -196,16 +195,16 @@ public class GetResult implements Streamable, Iterable, ToXContentObje return sourceAsMap(); } - public Map getFields() { + public Map getFields() { return fields; } - public GetField field(String name) { + public DocumentField field(String name) { return fields.get(name); } @Override - public Iterator iterator() { + public Iterator iterator() { if (fields == null) { return Collections.emptyIterator(); } @@ -213,10 +212,10 @@ public class GetResult implements Streamable, Iterable, ToXContentObje } public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException { - List metaFields = new ArrayList<>(); - List otherFields = new ArrayList<>(); + List metaFields = new ArrayList<>(); + List otherFields = new ArrayList<>(); if (fields != null && !fields.isEmpty()) { - for (GetField field : fields.values()) { + for (DocumentField field : fields.values()) { if (field.getValues().isEmpty()) { continue; } @@ -228,8 +227,9 @@ public class GetResult implements Streamable, Iterable, ToXContentObje } } - for (GetField field : metaFields) { - builder.field(field.getName(), field.getValue()); + for (DocumentField field : metaFields) { + Object value = field.getValue(); + builder.field(field.getName(), value); } builder.field(FOUND, exists); @@ -240,7 +240,7 @@ public class GetResult implements Streamable, Iterable, ToXContentObje if (!otherFields.isEmpty()) { builder.startObject(FIELDS); - for (GetField field : otherFields) { + for (DocumentField field : otherFields) { field.toXContent(builder, params); } builder.endObject(); @@ -273,9 +273,9 @@ public class GetResult implements Streamable, Iterable, ToXContentObje String currentFieldName = parser.currentName(); String index = null, type = null, id = null; long version = -1; - boolean found = false; + Boolean found = null; BytesReference source = null; - Map fields = new HashMap<>(); + Map fields = new HashMap<>(); while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -291,7 +291,7 @@ public class GetResult implements Streamable, Iterable, ToXContentObje } else if (FOUND.equals(currentFieldName)) { found = parser.booleanValue(); } else { - fields.put(currentFieldName, new GetField(currentFieldName, Collections.singletonList(parser.objectText()))); + fields.put(currentFieldName, new DocumentField(currentFieldName, Collections.singletonList(parser.objectText()))); } } else if (token == XContentParser.Token.START_OBJECT) { if (SourceFieldMapper.NAME.equals(currentFieldName)) { @@ -303,12 +303,14 @@ public class GetResult implements Streamable, Iterable, ToXContentObje } } else if (FIELDS.equals(currentFieldName)) { while(parser.nextToken() != XContentParser.Token.END_OBJECT) { - GetField getField = GetField.fromXContent(parser); + DocumentField getField = DocumentField.fromXContent(parser); fields.put(getField.getName(), getField); } } else { - throwUnknownField(currentFieldName, parser.getTokenLocation()); + parser.skipChildren(); // skip potential inner objects for forward compatibility } + } else if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); // skip potential inner arrays for forward compatibility } } return new GetResult(index, type, id, version, found, source, fields); @@ -345,7 +347,7 @@ public class GetResult implements Streamable, Iterable, ToXContentObje } else { fields = new HashMap<>(size); for (int i = 0; i < size; i++) { - GetField field = readGetField(in); + DocumentField field = DocumentField.readDocumentField(in); fields.put(field.getName(), field); } } @@ -365,7 +367,7 @@ public class GetResult implements Streamable, Iterable, ToXContentObje out.writeVInt(0); } else { out.writeVInt(fields.size()); - for (GetField field : fields.values()) { + for (DocumentField field : fields.values()) { field.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 15e6e234284..0aeb4f3f19d 100644 --- a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -24,6 +24,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; @@ -173,7 +174,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { } private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) { - Map fields = null; + Map fields = null; BytesReference source = null; DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext); @@ -189,7 +190,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { fieldVisitor.postProcess(mapperService); fields = new HashMap<>(fieldVisitor.fields().size()); for (Map.Entry> entry : fieldVisitor.fields().entrySet()) { - fields.put(entry.getKey(), new GetField(entry.getKey(), entry.getValue())); + fields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); } } } @@ -200,7 +201,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { if (fields == null) { fields = new HashMap<>(1); } - fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId))); + fields.put(ParentFieldMapper.NAME, new DocumentField(ParentFieldMapper.NAME, Collections.singletonList(parentId))); } if (gFields != null && gFields.length > 0) { diff --git a/core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 2f6775a1eae..5124201997e 100644 --- a/core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -34,6 +34,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -42,7 +43,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -254,7 +254,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource { } private T fieldValue(String fieldName) { - SearchHitField field = delegate.field(fieldName); + DocumentField field = delegate.field(fieldName); return field == null ? null : field.getValue(); } } diff --git a/core/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java b/core/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java index acb1c0e5547..4e11b3c9595 100644 --- a/core/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/index/reindex/WorkingBulkByScrollTask.java @@ -90,8 +90,9 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success @Override protected void onCancelled() { - // Drop the throttle to 0, immediately rescheduling all outstanding tasks so the task will wake up and cancel itself. - rethrottle(0); + /* Drop the throttle to 0, immediately rescheduling any throttled + * operation so it will wake up and cancel itself. */ + rethrottle(Float.POSITIVE_INFINITY); } @Override @@ -179,6 +180,7 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success // Synchronize so we are less likely to schedule the same request twice. synchronized (delayedPrepareBulkRequestReference) { TimeValue delay = throttleWaitTime(lastBatchStartTime, timeValueNanos(System.nanoTime()), lastBatchSize); + logger.debug("[{}]: preparing bulk request for [{}]", getId(), delay); delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), delay, new RunOnce(prepareBulkRequestRunnable))); } @@ -205,6 +207,9 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success } private void setRequestsPerSecond(float requestsPerSecond) { + if (requestsPerSecond <= 0) { + throw new IllegalArgumentException("requests per second must be more than 0 but was [" + requestsPerSecond + "]"); + } this.requestsPerSecond = requestsPerSecond; } @@ -216,8 +221,8 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success DelayedPrepareBulkRequest delayedPrepareBulkRequest = this.delayedPrepareBulkRequestReference.get(); if (delayedPrepareBulkRequest == null) { + // No request has been queued so nothing to reschedule. logger.debug("[{}]: skipping rescheduling because there is no scheduled task", getId()); - // No request has been queued yet so nothing to reschedule. return; } @@ -250,11 +255,11 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success } DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { - if (newRequestsPerSecond != 0 && newRequestsPerSecond < requestsPerSecond) { - /* - * The user is attempting to slow the request down. We'll let the change in throttle take effect the next time we delay - * prepareBulkRequest. We can't just reschedule the request further out in the future the bulk context might time out. - */ + if (newRequestsPerSecond < requestsPerSecond) { + /* The user is attempting to slow the request down. We'll let the + * change in throttle take effect the next time we delay + * prepareBulkRequest. We can't just reschedule the request further + * out in the future because the bulk context might time out. */ logger.debug("[{}]: skipping rescheduling because the new throttle [{}] is slower than the old one [{}]", getId(), newRequestsPerSecond, requestsPerSecond); return this; @@ -268,10 +273,10 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success return this; } - /* - * Strangely enough getting here doesn't mean that you actually cancelled the request, just that you probably did. If you stress - * test it you'll find that requests sneak through. So each request is given a runOnce boolean to prevent that. - */ + /* Strangely enough getting here doesn't mean that you actually + * cancelled the request, just that you probably did. If you stress + * test it you'll find that requests sneak through. So each request + * is given a runOnce boolean to prevent that. */ TimeValue newDelay = newDelay(remainingDelay, newRequestsPerSecond); logger.debug("[{}]: rescheduling for [{}] in the future", getId(), newDelay); return new DelayedPrepareBulkRequest(threadPool, requestsPerSecond, newDelay, command); @@ -281,7 +286,7 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success * Scale back remaining delay to fit the new delay. */ TimeValue newDelay(long remainingDelay, float newRequestsPerSecond) { - if (remainingDelay < 0 || newRequestsPerSecond == 0) { + if (remainingDelay < 0) { return timeValueNanos(0); } return timeValueNanos(round(remainingDelay * requestsPerSecond / newRequestsPerSecond)); diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java index ea6edef7a12..a669065d32b 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointTracker.java @@ -25,11 +25,17 @@ import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.PrimaryContext; import org.elasticsearch.index.shard.ShardId; +import java.util.Arrays; +import java.util.Comparator; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; /** * This class is responsible of tracking the global checkpoint. The global checkpoint is the highest sequence number for which all lower (or @@ -42,6 +48,8 @@ import java.util.Set; */ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { + long appliedClusterStateVersion; + /* * This map holds the last known local checkpoint for every active shard and initializing shard copies that has been brought up to speed * through recovery. These shards are treated as valid copies and participate in determining the global checkpoint. This map is keyed by @@ -68,6 +76,12 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { */ private long globalCheckpoint; + /* + * During relocation handoff, the state of the global checkpoint tracker is sampled. After sampling, there should be no additional + * mutations to this tracker until the handoff has completed. + */ + private boolean sealed = false; + /** * Initialize the global checkpoint service. The specified global checkpoint should be set to the last known global checkpoint, or * {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}. @@ -94,6 +108,9 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { * @param localCheckpoint the local checkpoint for the shard */ public synchronized void updateLocalCheckpoint(final String allocationId, final long localCheckpoint) { + if (sealed) { + throw new IllegalStateException("global checkpoint tracker is sealed"); + } final boolean updated; if (updateLocalCheckpoint(allocationId, localCheckpoint, inSyncLocalCheckpoints, "in-sync")) { updated = true; @@ -210,11 +227,18 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { /** * Notifies the service of the current allocation ids in the cluster state. This method trims any shards that have been removed. * - * @param activeAllocationIds the allocation IDs of the currently active shard copies - * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies + * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the master + * @param activeAllocationIds the allocation IDs of the currently active shard copies + * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies */ public synchronized void updateAllocationIdsFromMaster( - final Set activeAllocationIds, final Set initializingAllocationIds) { + final long applyingClusterStateVersion, final Set activeAllocationIds, final Set initializingAllocationIds) { + if (applyingClusterStateVersion < appliedClusterStateVersion) { + return; + } + + appliedClusterStateVersion = applyingClusterStateVersion; + // remove shards whose allocation ID no longer exists inSyncLocalCheckpoints.removeAll(a -> !activeAllocationIds.contains(a) && !initializingAllocationIds.contains(a)); @@ -248,6 +272,156 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { updateGlobalCheckpointOnPrimary(); } + /** + * Get the primary context for the shard. This includes the state of the global checkpoint tracker. + * + * @return the primary context + */ + synchronized PrimaryContext primaryContext() { + if (sealed) { + throw new IllegalStateException("global checkpoint tracker is sealed"); + } + sealed = true; + final ObjectLongMap inSyncLocalCheckpoints = new ObjectLongHashMap<>(this.inSyncLocalCheckpoints); + final ObjectLongMap trackingLocalCheckpoints = new ObjectLongHashMap<>(this.trackingLocalCheckpoints); + return new PrimaryContext(appliedClusterStateVersion, inSyncLocalCheckpoints, trackingLocalCheckpoints); + } + + /** + * Releases a previously acquired primary context. + */ + synchronized void releasePrimaryContext() { + assert sealed; + sealed = false; + } + + /** + * Updates the known allocation IDs and the local checkpoints for the corresponding allocations from a primary relocation source. + * + * @param primaryContext the primary context + */ + synchronized void updateAllocationIdsFromPrimaryContext(final PrimaryContext primaryContext) { + if (sealed) { + throw new IllegalStateException("global checkpoint tracker is sealed"); + } + /* + * We are gathered here today to witness the relocation handoff transferring knowledge from the relocation source to the relocation + * target. We need to consider the possibility that the version of the cluster state on the relocation source when the primary + * context was sampled is different than the version of the cluster state on the relocation target at this exact moment. We define + * the following values: + * - version(source) = the cluster state version on the relocation source used to ensure a minimum cluster state version on the + * relocation target + * - version(context) = the cluster state version on the relocation source when the primary context was sampled + * - version(target) = the current cluster state version on the relocation target + * + * We know that version(source) <= version(target) and version(context) < version(target), version(context) = version(target), and + * version(target) < version(context) are all possibilities. + * + * The case of version(context) = version(target) causes no issues as in this case the knowledge of the in-sync and initializing + * shards the target receives from the master will be equal to the knowledge of the in-sync and initializing shards the target + * receives from the relocation source via the primary context. + * + * Let us now consider the case that version(context) < version(target). In this case, the active allocation IDs in the primary + * context can be a superset of the active allocation IDs contained in the applied cluster state. This is because no new shards can + * have been started as marking a shard as in-sync is blocked during relocation handoff. Note however that the relocation target + * itself will have been marked in-sync during recovery and therefore is an active allocation ID from the perspective of the primary + * context. + * + * Finally, we consider the case that version(target) < version(context). In this case, the active allocation IDs in the primary + * context can be a subset of the active allocation IDs contained the applied cluster state. This is again because no new shards can + * have been started. Moreover, existing active allocation IDs could have been removed from the cluster state. + * + * In each of these latter two cases, consider initializing shards that are contained in the primary context but not contained in + * the cluster state applied on the target. + * + * If version(context) < version(target) it means that the shard has been removed by a later cluster state update that is already + * applied on the target and we only need to ensure that we do not add it to the tracking map on the target. The call to + * GlobalCheckpointTracker#updateLocalCheckpoint(String, long) is a no-op for such shards and this is safe. + * + * If version(target) < version(context) it means that the shard has started initializing by a later cluster state update has not + * yet arrived on the target. However, there is a delay on recoveries before we ensure that version(source) <= version(target). + * Therefore, such a shard can never initialize from the relocation source and will have to await the handoff completing. As such, + * these shards are not problematic. + * + * Lastly, again in these two cases, what about initializing shards that are contained in cluster state applied on the target but + * not contained in the cluster state applied on the target. + * + * If version(context) < version(target) it means that a shard has started initializing by a later cluster state that is applied on + * the target but not yet known to what would be the relocation source. As recoveries are delayed at this time, these shards can not + * cause a problem and we do not mutate remove these shards from the tracking map, so we are safe here. + * + * If version(target) < version(context) it means that a shard has started initializing but was removed by a later cluster state. In + * this case, as the cluster state version on the primary context exceeds the applied cluster state version, we replace the tracking + * map and are safe here too. + */ + + assert StreamSupport + .stream(inSyncLocalCheckpoints.spliterator(), false) + .allMatch(e -> e.value == SequenceNumbersService.UNASSIGNED_SEQ_NO) : inSyncLocalCheckpoints; + assert StreamSupport + .stream(trackingLocalCheckpoints.spliterator(), false) + .allMatch(e -> e.value == SequenceNumbersService.UNASSIGNED_SEQ_NO) : trackingLocalCheckpoints; + assert pendingInSync.isEmpty() : pendingInSync; + + if (primaryContext.clusterStateVersion() > appliedClusterStateVersion) { + final Set activeAllocationIds = + new HashSet<>(Arrays.asList(primaryContext.inSyncLocalCheckpoints().keys().toArray(String.class))); + final Set initializingAllocationIds = + new HashSet<>(Arrays.asList(primaryContext.trackingLocalCheckpoints().keys().toArray(String.class))); + updateAllocationIdsFromMaster(primaryContext.clusterStateVersion(), activeAllocationIds, initializingAllocationIds); + } + + /* + * As we are updating the local checkpoints for the in-sync allocation IDs, the global checkpoint will advance in place; this means + * that we have to sort the incoming local checkpoints from smallest to largest lest we violate that the global checkpoint does not + * regress. + */ + + class AllocationIdLocalCheckpointPair { + + private final String allocationId; + + public String allocationId() { + return allocationId; + } + + private final long localCheckpoint; + + public long localCheckpoint() { + return localCheckpoint; + } + + private AllocationIdLocalCheckpointPair(final String allocationId, final long localCheckpoint) { + this.allocationId = allocationId; + this.localCheckpoint = localCheckpoint; + } + + } + + final List inSync = + StreamSupport + .stream(primaryContext.inSyncLocalCheckpoints().spliterator(), false) + .map(e -> new AllocationIdLocalCheckpointPair(e.key, e.value)) + .collect(Collectors.toList()); + inSync.sort(Comparator.comparingLong(AllocationIdLocalCheckpointPair::localCheckpoint)); + + for (final AllocationIdLocalCheckpointPair cursor : inSync) { + assert cursor.localCheckpoint() >= globalCheckpoint + : "local checkpoint [" + cursor.localCheckpoint() + "] " + + "for allocation ID [" + cursor.allocationId() + "] " + + "violates being at least the global checkpoint [" + globalCheckpoint + "]"; + updateLocalCheckpoint(cursor.allocationId(), cursor.localCheckpoint()); + if (trackingLocalCheckpoints.containsKey(cursor.allocationId())) { + moveAllocationIdFromTrackingToInSync(cursor.allocationId(), "relocation"); + updateGlobalCheckpointOnPrimary(); + } + } + + for (final ObjectLongCursor cursor : primaryContext.trackingLocalCheckpoints()) { + updateLocalCheckpoint(cursor.key, cursor.value); + } + } + /** * Marks the shard with the provided allocation ID as in-sync with the primary shard. This method will block until the local checkpoint * on the specified shard advances above the current global checkpoint. @@ -258,6 +432,9 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { * @throws InterruptedException if the thread is interrupted waiting for the local checkpoint on the shard to advance */ public synchronized void markAllocationIdAsInSync(final String allocationId, final long localCheckpoint) throws InterruptedException { + if (sealed) { + throw new IllegalStateException("global checkpoint tracker is sealed"); + } if (!trackingLocalCheckpoints.containsKey(allocationId)) { /* * This can happen if the recovery target has been failed and the cluster state update from the master has triggered removing @@ -295,15 +472,13 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { */ final long current = trackingLocalCheckpoints.getOrDefault(allocationId, Long.MIN_VALUE); if (current >= globalCheckpoint) { - logger.trace("marked [{}] as in-sync with local checkpoint [{}]", allocationId, current); - trackingLocalCheckpoints.remove(allocationId); /* * This is prematurely adding the allocation ID to the in-sync map as at this point recovery is not yet finished and could * still abort. At this point we will end up with a shard in the in-sync map holding back the global checkpoint because the * shard never recovered and we would have to wait until either the recovery retries and completes successfully, or the * master fails the shard and issues a cluster state update that removes the shard from the set of active allocation IDs. */ - inSyncLocalCheckpoints.put(allocationId, current); + moveAllocationIdFromTrackingToInSync(allocationId, "recovery"); break; } else { waitForLocalCheckpointToAdvance(); @@ -311,6 +486,21 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { } } + /** + * Moves a tracking allocation ID to be in-sync. This can occur when a shard is recovering from the primary and its local checkpoint has + * advanced past the global checkpoint, or during relocation hand-off when the relocation target learns of an in-sync shard from the + * relocation source. + * + * @param allocationId the allocation ID to move + * @param reason the reason for the transition + */ + private synchronized void moveAllocationIdFromTrackingToInSync(final String allocationId, final String reason) { + assert trackingLocalCheckpoints.containsKey(allocationId); + final long current = trackingLocalCheckpoints.remove(allocationId); + inSyncLocalCheckpoints.put(allocationId, current); + logger.trace("marked [{}] as in-sync with local checkpoint [{}] due to [{}]", allocationId, current, reason); + } + /** * Wait for the local checkpoint to advance to the global checkpoint. * @@ -324,12 +514,21 @@ public class GlobalCheckpointTracker extends AbstractIndexShardComponent { /** * Check if there are any recoveries pending in-sync. * - * @return {@code true} if there is at least one shard pending in-sync, otherwise false + * @return true if there is at least one shard pending in-sync, otherwise false */ - public boolean pendingInSync() { + boolean pendingInSync() { return !pendingInSync.isEmpty(); } + /** + * Check if the tracker is sealed. + * + * @return true if the tracker is sealed, otherwise false. + */ + boolean sealed() { + return sealed; + } + /** * Returns the local checkpoint for the shard with the specified allocation ID, or {@link SequenceNumbersService#UNASSIGNED_SEQ_NO} if * the shard is not in-sync. diff --git a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java index 4180c7e0f7d..6d8b87599a1 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/SequenceNumbersService.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.seqno; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; +import org.elasticsearch.index.shard.PrimaryContext; import org.elasticsearch.index.shard.ShardId; import java.util.Set; @@ -165,13 +166,24 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { /** * Notifies the service of the current allocation IDs in the cluster state. See - * {@link GlobalCheckpointTracker#updateAllocationIdsFromMaster(Set, Set)} for details. + * {@link GlobalCheckpointTracker#updateAllocationIdsFromMaster(long, Set, Set)} for details. * - * @param activeAllocationIds the allocation IDs of the currently active shard copies - * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies + * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the master + * @param activeAllocationIds the allocation IDs of the currently active shard copies + * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies */ - public void updateAllocationIdsFromMaster(final Set activeAllocationIds, final Set initializingAllocationIds) { - globalCheckpointTracker.updateAllocationIdsFromMaster(activeAllocationIds, initializingAllocationIds); + public void updateAllocationIdsFromMaster( + final long applyingClusterStateVersion, final Set activeAllocationIds, final Set initializingAllocationIds) { + globalCheckpointTracker.updateAllocationIdsFromMaster(applyingClusterStateVersion, activeAllocationIds, initializingAllocationIds); + } + + /** + * Updates the known allocation IDs and the local checkpoints for the corresponding allocations from a primary relocation source. + * + * @param primaryContext the sequence number context + */ + public void updateAllocationIdsFromPrimaryContext(final PrimaryContext primaryContext) { + globalCheckpointTracker.updateAllocationIdsFromPrimaryContext(primaryContext); } /** @@ -183,4 +195,20 @@ public class SequenceNumbersService extends AbstractIndexShardComponent { return globalCheckpointTracker.pendingInSync(); } + /** + * Get the primary context for the shard. This includes the state of the global checkpoint tracker. + * + * @return the primary context + */ + public PrimaryContext primaryContext() { + return globalCheckpointTracker.primaryContext(); + } + + /** + * Releases a previously acquired primary context. + */ + public void releasePrimaryContext() { + globalCheckpointTracker.releasePrimaryContext(); + } + } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 18f025c27c3..db0f27a28ca 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexOptions; @@ -33,6 +34,7 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.ThreadInterruptedException; import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -44,6 +46,7 @@ import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -57,6 +60,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AsyncIOProcessor; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; @@ -85,7 +89,9 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; @@ -97,6 +103,7 @@ import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.ShardSearchStats; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store.MetadataSnapshot; @@ -109,6 +116,7 @@ import org.elasticsearch.index.warmer.ShardIndexWarmerService; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFailedException; @@ -144,6 +152,8 @@ import java.util.function.Consumer; import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.index.mapper.SourceToParse.source; + public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { private final ThreadPool threadPool; @@ -167,7 +177,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final IndexEventListener indexEventListener; private final QueryCachingPolicy cachingPolicy; private final Supplier indexSortSupplier; - private final TranslogOpToEngineOpConverter translogOpToEngineOpConverter; /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this @@ -260,7 +269,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); - this.translogOpToEngineOpConverter = new TranslogOpToEngineOpConverter(shardId, mapperService); // the query cache is a node-level thing, however we want the most popular filters // to be computed on a per-shard basis if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) { @@ -276,7 +284,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl searcherWrapper = indexSearcherWrapper; primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id()); refreshListeners = buildRefreshListeners(); - persistMetadata(shardRouting, null); + persistMetadata(path, indexSettings, shardRouting, null, logger); } public Store store() { @@ -335,58 +343,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return this.primaryTerm; } - /** - * Notifies the shard of an increase in the primary term. - * - * @param newPrimaryTerm the new primary term - */ - public void updatePrimaryTerm(final long newPrimaryTerm) { - assert shardRouting.primary() : "primary term can only be explicitly updated on a primary shard"; - synchronized (mutex) { - if (newPrimaryTerm != primaryTerm) { - // Note that due to cluster state batching an initializing primary shard term can failed and re-assigned - // in one state causing it's term to be incremented. Note that if both current shard state and new - // shard state are initializing, we could replace the current shard and reinitialize it. It is however - // possible that this shard is being started. This can happen if: - // 1) Shard is post recovery and sends shard started to the master - // 2) Node gets disconnected and rejoins - // 3) Master assigns the shard back to the node - // 4) Master processes the shard started and starts the shard - // 5) The node process the cluster state where the shard is both started and primary term is incremented. - // - // We could fail the shard in that case, but this will cause it to be removed from the insync allocations list - // potentially preventing re-allocation. - assert shardRouting.initializing() == false : - "a started primary shard should never update its term; " - + "shard " + shardRouting + ", " - + "current term [" + primaryTerm + "], " - + "new term [" + newPrimaryTerm + "]"; - assert newPrimaryTerm > primaryTerm : - "primary terms can only go up; current term [" + primaryTerm + "], new term [" + newPrimaryTerm + "]"; - /* - * Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we - * increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is - * incremented. - */ - final CountDownLatch latch = new CountDownLatch(1); - indexShardOperationPermits.asyncBlockOperations( - 30, - TimeUnit.MINUTES, - () -> { - latch.await(); - try { - getEngine().fillSeqNoGaps(newPrimaryTerm); - } catch (final AlreadyClosedException e) { - // okay, the index was deleted - } - }, - e -> failShard("exception during primary term transition", e)); - primaryTerm = newPrimaryTerm; - latch.countDown(); - } - } - } - /** * Returns the latest cluster routing entry received with this shard. */ @@ -399,50 +355,29 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return cachingPolicy; } - /** - * Updates the shards routing entry. This mutate the shards internal state depending - * on the changes that get introduced by the new routing value. This method will persist shard level metadata. - * - * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted - * @throws IOException if shard state could not be persisted - */ - public void updateRoutingEntry(ShardRouting newRouting) throws IOException { + + @Override + public void updateShardState(final ShardRouting newRouting, + final long newPrimaryTerm, + final CheckedBiConsumer, IOException> primaryReplicaSyncer, + final long applyingClusterStateVersion, + final Set activeAllocationIds, + final Set initializingAllocationIds) throws IOException { final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; + updateRoutingEntry(newRouting); - if (!newRouting.shardId().equals(shardId())) { - throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId()); - } - if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) { - throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting); - } - if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) { - throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current " - + currentRouting + ", new " + newRouting); - } + if (shardRouting.primary()) { + updatePrimaryTerm(newPrimaryTerm, primaryReplicaSyncer); - if (state == IndexShardState.POST_RECOVERY && newRouting.active()) { - assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting; - // we want to refresh *before* we move to internal STARTED state - try { - getEngine().refresh("cluster_state_started"); - } catch (Exception e) { - logger.debug("failed to refresh due to move to cluster wide started", e); + final Engine engine = getEngineOrNull(); + // if the engine is not yet started, we are not ready yet and can just ignore this + if (engine != null) { + engine.seqNoService().updateAllocationIdsFromMaster( + applyingClusterStateVersion, activeAllocationIds, initializingAllocationIds); } - changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); - } else if (state == IndexShardState.RELOCATED && - (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { - // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery - // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two - // active primaries. - throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); } - assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || - state == IndexShardState.CLOSED : - "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; - this.shardRouting = newRouting; - persistMetadata(newRouting, currentRouting); } if (currentRouting != null && currentRouting.active() == false && newRouting.active()) { indexEventListener.afterIndexShardStarted(this); @@ -452,6 +387,117 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } + private void updateRoutingEntry(ShardRouting newRouting) throws IOException { + assert Thread.holdsLock(mutex); + final ShardRouting currentRouting = this.shardRouting; + + if (!newRouting.shardId().equals(shardId())) { + throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId()); + } + if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) { + throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting); + } + if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) { + throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current " + + currentRouting + ", new " + newRouting); + } + + if (state == IndexShardState.POST_RECOVERY && newRouting.active()) { + assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting; + // we want to refresh *before* we move to internal STARTED state + try { + getEngine().refresh("cluster_state_started"); + } catch (Exception e) { + logger.debug("failed to refresh due to move to cluster wide started", e); + } + changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); + } else if (state == IndexShardState.RELOCATED && + (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { + // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery + // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two + // active primaries. + throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); + } + assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || + state == IndexShardState.CLOSED : + "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; + this.shardRouting = newRouting; + persistMetadata(path, indexSettings, newRouting, currentRouting, logger); + } + + private void updatePrimaryTerm( + final long newPrimaryTerm, final CheckedBiConsumer, IOException> primaryReplicaSyncer) { + assert Thread.holdsLock(mutex); + assert shardRouting.primary() : "primary term can only be explicitly updated on a primary shard"; + if (newPrimaryTerm != primaryTerm) { + /* Note that due to cluster state batching an initializing primary shard term can failed and re-assigned + * in one state causing it's term to be incremented. Note that if both current shard state and new + * shard state are initializing, we could replace the current shard and reinitialize it. It is however + * possible that this shard is being started. This can happen if: + * 1) Shard is post recovery and sends shard started to the master + * 2) Node gets disconnected and rejoins + * 3) Master assigns the shard back to the node + * 4) Master processes the shard started and starts the shard + * 5) The node process the cluster state where the shard is both started and primary term is incremented. + * + * We could fail the shard in that case, but this will cause it to be removed from the insync allocations list + * potentially preventing re-allocation. + */ + assert shardRouting.initializing() == false : + "a started primary shard should never update its term; " + + "shard " + shardRouting + ", " + + "current term [" + primaryTerm + "], " + + "new term [" + newPrimaryTerm + "]"; + assert newPrimaryTerm > primaryTerm : + "primary terms can only go up; current term [" + primaryTerm + "], new term [" + newPrimaryTerm + "]"; + /* + * Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we + * increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is + * incremented. + */ + final CountDownLatch latch = new CountDownLatch(1); + // to prevent primary relocation handoff while resync is not completed + boolean resyncStarted = primaryReplicaResyncInProgress.compareAndSet(false, true); + if (resyncStarted == false) { + throw new IllegalStateException("cannot start resync while it's already in progress"); + } + indexShardOperationPermits.asyncBlockOperations( + 30, + TimeUnit.MINUTES, + () -> { + latch.await(); + try { + getEngine().fillSeqNoGaps(newPrimaryTerm); + primaryReplicaSyncer.accept(IndexShard.this, new ActionListener() { + @Override + public void onResponse(ResyncTask resyncTask) { + logger.info("primary-replica resync completed with {} operations", + resyncTask.getResyncedOperations()); + boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false); + assert resyncCompleted : "primary-replica resync finished but was not started"; + } + + @Override + public void onFailure(Exception e) { + boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false); + assert resyncCompleted : "primary-replica resync finished but was not started"; + if (state == IndexShardState.CLOSED) { + // ignore, shutting down + } else { + failShard("exception during primary-replica resync", e); + } + } + }); + } catch (final AlreadyClosedException e) { + // okay, the index was deleted + } + }, + e -> failShard("exception during primary term transition", e)); + primaryTerm = newPrimaryTerm; + latch.countDown(); + } + } + /** * Marks the shard as recovering based on a recovery state, fails with exception is recovering is not allowed to be set. */ @@ -478,27 +524,44 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } - public void relocated(String reason) throws IllegalIndexShardStateException, InterruptedException { + private final AtomicBoolean primaryReplicaResyncInProgress = new AtomicBoolean(); + + /** + * Completes the relocation. Operations are blocked and current operations are drained before changing state to relocated. The provided + * {@link Runnable} is executed after all operations are successfully blocked. + * + * @param reason the reason for the relocation + * @param consumer a {@link Runnable} that is executed after operations are blocked + * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation + * @throws InterruptedException if blocking operations is interrupted + */ + public void relocated( + final String reason, final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; try { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { // no shard operation permits are being held here, move state from started to relocated assert indexShardOperationPermits.getActiveOperationsCount() == 0 : - "in-flight operations in progress while moving shard state to relocated"; - synchronized (mutex) { - if (state != IndexShardState.STARTED) { - throw new IndexShardNotStartedException(shardId, state); + "in-flight operations in progress while moving shard state to relocated"; + /* + * We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a + * network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations. + */ + verifyRelocatingState(); + final PrimaryContext primaryContext = getEngine().seqNoService().primaryContext(); + try { + consumer.accept(primaryContext); + synchronized (mutex) { + verifyRelocatingState(); + changeState(IndexShardState.RELOCATED, reason); } - // if the master cancelled the recovery, the target will be removed - // and the recovery will stopped. - // However, it is still possible that we concurrently end up here - // and therefore have to protect we don't mark the shard as relocated when - // its shard routing says otherwise. - if (shardRouting.relocating() == false) { - throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED, - ": shard is no longer relocating " + shardRouting); + } catch (final Exception e) { + try { + getEngine().seqNoService().releasePrimaryContext(); + } catch (final Exception inner) { + e.addSuppressed(inner); } - changeState(IndexShardState.RELOCATED, reason); + throw e; } }); } catch (TimeoutException e) { @@ -510,6 +573,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } + private void verifyRelocatingState() { + if (state != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(shardId, state); + } + /* + * If the master cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible + * that we concurrently end up here and therefore have to protect that we do not mark the shard as relocated when its shard routing + * says otherwise. + */ + + if (shardRouting.relocating() == false) { + throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED, + ": shard is no longer relocating " + shardRouting); + } + + if (primaryReplicaResyncInProgress.get()) { + throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED, + ": primary relocation is forbidden while primary-replica resync is in progress " + shardRouting); + } + } public IndexShardState state() { return state; @@ -531,34 +614,47 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return previousState; } - public Engine.Index prepareIndexOnPrimary(SourceToParse source, long version, VersionType versionType, long autoGeneratedIdTimestamp, - boolean isRetry) { + public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse, + long autoGeneratedTimestamp, boolean isRetry, + Consumer onMappingUpdate) throws IOException { + return applyIndexOperation(SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp, + isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse, onMappingUpdate); + } + + public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long opPrimaryTerm, long version, VersionType versionType, + long autoGeneratedTimeStamp, boolean isRetry, SourceToParse sourceToParse, + Consumer onMappingUpdate) throws IOException { + return applyIndexOperation(seqNo, opPrimaryTerm, version, versionType, autoGeneratedTimeStamp, isRetry, + Engine.Operation.Origin.REPLICA, sourceToParse, onMappingUpdate); + } + + private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, VersionType versionType, + long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, + SourceToParse sourceToParse, Consumer onMappingUpdate) throws IOException { + assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; + assert versionType.validateVersionForWrites(version); + ensureWriteAllowed(origin); + Engine.Index operation; try { - verifyPrimary(); - return prepareIndex(docMapper(source.type()), source, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, - Engine.Operation.Origin.PRIMARY, autoGeneratedIdTimestamp, isRetry); + operation = prepareIndex(docMapper(sourceToParse.type()), sourceToParse, seqNo, opPrimaryTerm, version, versionType, origin, + autoGeneratedTimeStamp, isRetry); + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + // wrap this in the outer catch block, as the master might also throw a MapperParsingException when updating the mapping + onMappingUpdate.accept(update); + } + } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { + return new Engine.IndexResult(e, version, seqNo); } catch (Exception e) { verifyNotClosed(e); throw e; } + + return index(getEngine(), operation); } - public Engine.Index prepareIndexOnReplica(SourceToParse source, long opSeqNo, long opPrimaryTerm, long version, VersionType versionType, - long autoGeneratedIdTimestamp, boolean isRetry) { - try { - verifyReplicationTarget(); - assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; - return prepareIndex(docMapper(source.type()), source, opSeqNo, opPrimaryTerm, version, versionType, - Engine.Operation.Origin.REPLICA, autoGeneratedIdTimestamp, isRetry); - } catch (Exception e) { - verifyNotClosed(e); - throw e; - } - } - - static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long seqNo, long primaryTerm, long version, - VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, - boolean isRetry) { + public static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long seqNo, long primaryTerm, long version, + VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.getDocumentMapper().parse(source); if (docMapper.getMapping() != null) { @@ -573,43 +669,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry); } - /** - * Applies an engine operation to the shard, which can be either an index, delete or noop operation. - */ - public Engine.Result applyOperation(Engine.Operation operation) throws IOException { - return applyOperation(getEngine(), operation); - } - - private Engine.Result applyOperation(Engine engine, Engine.Operation operation) throws IOException { - switch (operation.operationType()) { - case INDEX: - Engine.Index engineIndex = (Engine.Index) operation; - return index(engine, engineIndex); - case DELETE: - final Engine.Delete engineDelete = (Engine.Delete) operation; - return delete(engine, engineDelete); - case NO_OP: - final Engine.NoOp engineNoOp = (Engine.NoOp) operation; - return noOp(engine, engineNoOp); - default: - throw new IllegalStateException("No operation defined for [" + operation + "]"); - } - } - - private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) { - active.set(true); - if (logger.isTraceEnabled()) { - logger.trace("noop (seq# [{}])", noOp.seqNo()); - } - return engine.noOp(noOp); - } - - public Engine.IndexResult index(Engine.Index index) throws IOException { - ensureWriteAllowed(index); - Engine engine = getEngine(); - return index(engine, index); - } - private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOException { active.set(true); final Engine.IndexResult result; @@ -628,32 +687,66 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return result; } - public Engine.NoOp prepareMarkingSeqNoAsNoOpOnReplica(long seqNo, long opPrimaryTerm, String reason) { - verifyReplicationTarget(); - assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; - long startTime = System.nanoTime(); - return new Engine.NoOp(seqNo, opPrimaryTerm, Engine.Operation.Origin.REPLICA, startTime, reason); + public Engine.NoOpResult markSeqNoAsNoop(long seqNo, long primaryTerm, String reason) throws IOException { + return markSeqNoAsNoop(seqNo, primaryTerm, reason, Engine.Operation.Origin.REPLICA); } - public Engine.NoOpResult markSeqNoAsNoOp(Engine.NoOp noOp) throws IOException { - ensureWriteAllowed(noOp); - Engine engine = getEngine(); + private Engine.NoOpResult markSeqNoAsNoop(long seqNo, long opPrimaryTerm, String reason, + Engine.Operation.Origin origin) throws IOException { + assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; + long startTime = System.nanoTime(); + ensureWriteAllowed(origin); + final Engine.NoOp noOp = new Engine.NoOp(seqNo, opPrimaryTerm, origin, startTime, reason); + return noOp(getEngine(), noOp); + } + + private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) { + active.set(true); + if (logger.isTraceEnabled()) { + logger.trace("noop (seq# [{}])", noOp.seqNo()); + } return engine.noOp(noOp); } - public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) { - verifyPrimary(); - final Term uid = extractUidForDelete(type, id); - return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, - versionType, Engine.Operation.Origin.PRIMARY); + public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType, + Consumer onMappingUpdate) throws IOException { + return applyDeleteOperation(SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType, + Engine.Operation.Origin.PRIMARY, onMappingUpdate); } - public Engine.Delete prepareDeleteOnReplica(String type, String id, long opSeqNo, long opPrimaryTerm, - long version, VersionType versionType) { - verifyReplicationTarget(); + public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long primaryTerm, long version, String type, String id, + VersionType versionType, + Consumer onMappingUpdate) throws IOException { + return applyDeleteOperation(seqNo, primaryTerm, version, type, id, versionType, Engine.Operation.Origin.REPLICA, onMappingUpdate); + } + + private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, long version, String type, String id, + VersionType versionType, Engine.Operation.Origin origin, + Consumer onMappingUpdate) throws IOException { assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; + assert versionType.validateVersionForWrites(version); + ensureWriteAllowed(origin); + if (indexSettings().isSingleType()) { + // When there is a single type, the unique identifier is only composed of the _id, + // so there is no way to differenciate foo#1 from bar#1. This is especially an issue + // if a user first deletes foo#1 and then indexes bar#1: since we do not encode the + // _type in the uid it might look like we are reindexing the same document, which + // would fail if bar#1 is indexed with a lower version than foo#1 was deleted with. + // In order to work around this issue, we make deletions create types. This way, we + // fail if index and delete operations do not use the same type. + try { + Mapping update = docMapper(type).getMapping(); + if (update != null) { + onMappingUpdate.accept(update); + } + } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { + return new Engine.DeleteResult(e, version, seqNo, false); + } + } final Term uid = extractUidForDelete(type, id); - return prepareDelete(type, id, uid, opSeqNo, opPrimaryTerm, version, versionType, Engine.Operation.Origin.REPLICA); + final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version, + versionType, origin); + return delete(getEngine(), delete); } private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, @@ -662,12 +755,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime); } - public Engine.DeleteResult delete(Engine.Delete delete) throws IOException { - ensureWriteAllowed(delete); - Engine engine = getEngine(); - return delete(engine, delete); - } - private Term extractUidForDelete(String type, String id) { if (indexSettings.isSingleType()) { // This is only correct because we create types dynamically on delete operations @@ -876,13 +963,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Rolls the tranlog generation. - * - * @throws IOException if any file operations on the translog throw an I/O exception + * Rolls the tranlog generation and cleans unneeded. */ - private void rollTranslogGeneration() throws IOException { + private void rollTranslogGeneration() { final Engine engine = getEngine(); - engine.getTranslog().rollGeneration(); + engine.rollTranslogGeneration(); } public void forceMerge(ForceMergeRequest forceMerge) throws IOException { @@ -1053,8 +1138,32 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl assert currentEngineReference.get() == null; } - public Engine.Operation convertToEngineOp(Translog.Operation operation, Engine.Operation.Origin origin) { - return translogOpToEngineOpConverter.convertToEngineOp(operation, origin); + public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin, + Consumer onMappingUpdate) throws IOException { + final Engine.Result result; + switch (operation.opType()) { + case INDEX: + final Translog.Index index = (Translog.Index) operation; + // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all + // autoGeneratedID docs that are coming from the primary are updated correctly. + result = applyIndexOperation(index.seqNo(), index.primaryTerm(), index.version(), + index.versionType().versionTypeForReplicationAndRecovery(), index.getAutoGeneratedIdTimestamp(), true, origin, + source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source())) + .routing(index.routing()).parent(index.parent()), onMappingUpdate); + break; + case DELETE: + final Translog.Delete delete = (Translog.Delete) operation; + result = applyDeleteOperation(delete.seqNo(), delete.primaryTerm(), delete.version(), delete.type(), delete.id(), + delete.versionType().versionTypeForReplicationAndRecovery(), origin, onMappingUpdate); + break; + case NO_OP: + final Translog.NoOp noOp = (Translog.NoOp) operation; + result = markSeqNoAsNoop(noOp.seqNo(), noOp.primaryTerm(), noOp.reason(), origin); + break; + default: + throw new IllegalStateException("No operation defined for [" + operation + "]"); + } + return result; } // package-private for testing @@ -1066,12 +1175,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl while ((operation = snapshot.next()) != null) { try { logger.trace("[translog] recover op {}", operation); - Engine.Operation engineOp = convertToEngineOp(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY); - applyOperation(engine, engineOp); + Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, update -> { + throw new IllegalArgumentException("unexpected mapping update: " + update); + }); + ExceptionsHelper.reThrowIfNotNull(result.getFailure()); opsRecovered++; recoveryState.getTranslog().incrementRecoveredOperations(); - } catch (ElasticsearchException e) { - if (e.status() == RestStatus.BAD_REQUEST) { + } catch (Exception e) { + if (ExceptionsHelper.status(e) == RestStatus.BAD_REQUEST) { // mainly for MapperParsingException and Failure to detect xcontent logger.info("ignoring recovery of a corrupt translog entry", e); } else { @@ -1227,11 +1338,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } - private void ensureWriteAllowed(Engine.Operation op) throws IllegalIndexShardStateException { - Engine.Operation.Origin origin = op.origin(); + private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read if (origin == Engine.Operation.Origin.PRIMARY) { + verifyPrimary(); if (writeAllowedStatesForPrimary.contains(state) == false) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]"); } @@ -1241,6 +1352,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } else { assert origin == Engine.Operation.Origin.REPLICA; + verifyReplicationTarget(); if (writeAllowedStatesForReplica.contains(state) == false) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]"); } @@ -1249,7 +1361,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private void verifyPrimary() { if (shardRouting.primary() == false) { - throw new IllegalStateException("shard is not a primary " + shardRouting); + throw new IllegalStateException("shard " + shardRouting + " is not a primary"); } } @@ -1257,8 +1369,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final IndexShardState state = state(); if (shardRouting.primary() && shardRouting.active() && state != IndexShardState.RELOCATED) { // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException - throw new IllegalStateException("active primary shard cannot be a replication target before " + - " relocation hand off " + shardRouting + ", state is [" + state + "]"); + throw new IllegalStateException("active primary shard " + shardRouting + " cannot be a replication target before " + + "relocation hand off, state is [" + state + "]"); } } @@ -1533,8 +1645,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl verifyPrimary(); getEngine().seqNoService().markAllocationIdAsInSync(allocationId, localCheckpoint); /* - * We could have blocked waiting for the replica to catch up that we fell idle and there will not be a background sync to the - * replica; mark our self as active to force a future background sync. + * We could have blocked so long waiting for the replica to catch up that we fell idle and there will not be a background sync to + * the replica; mark our self as active to force a future background sync. */ active.compareAndSet(false, true); } @@ -1583,19 +1695,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Notifies the service of the current allocation IDs in the cluster state. See - * {@link org.elasticsearch.index.seqno.GlobalCheckpointTracker#updateAllocationIdsFromMaster(Set, Set)} - * for details. + * Updates the known allocation IDs and the local checkpoints for the corresponding allocations from a primary relocation source. * - * @param activeAllocationIds the allocation IDs of the currently active shard copies - * @param initializingAllocationIds the allocation IDs of the currently initializing shard copies + * @param primaryContext the sequence number context */ - public void updateAllocationIdsFromMaster(final Set activeAllocationIds, final Set initializingAllocationIds) { + public void updateAllocationIdsFromPrimaryContext(final PrimaryContext primaryContext) { verifyPrimary(); + assert shardRouting.isRelocationTarget() : "only relocation target can update allocation IDs from primary context: " + shardRouting; final Engine engine = getEngineOrNull(); - // if the engine is not yet started, we are not ready yet and can just ignore this if (engine != null) { - engine.seqNoService().updateAllocationIdsFromMaster(activeAllocationIds, initializingAllocationIds); + engine.seqNoService().updateAllocationIdsFromPrimaryContext(primaryContext); } } @@ -1855,11 +1964,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return engineFactory.newReadWriteEngine(config); } - // pkg private for testing - void persistMetadata(ShardRouting newRouting, @Nullable ShardRouting currentRouting) throws IOException { + private static void persistMetadata( + final ShardPath shardPath, + final IndexSettings indexSettings, + final ShardRouting newRouting, + final @Nullable ShardRouting currentRouting, + final Logger logger) throws IOException { assert newRouting != null : "newRouting must not be null"; // only persist metadata if routing information that is persisted in shard state metadata actually changed + final ShardId shardId = newRouting.shardId(); if (currentRouting == null || currentRouting.primary() != newRouting.primary() || currentRouting.allocationId().equals(newRouting.allocationId()) == false) { @@ -1871,17 +1985,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl writeReason = "routing changed from " + currentRouting + " to " + newRouting; } logger.trace("{} writing shard state, reason [{}]", shardId, writeReason); - final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.primary(), getIndexUUID(), newRouting.allocationId()); - ShardStateMetaData.FORMAT.write(newShardStateMetadata, shardPath().getShardStatePath()); + final ShardStateMetaData newShardStateMetadata = + new ShardStateMetaData(newRouting.primary(), indexSettings.getUUID(), newRouting.allocationId()); + ShardStateMetaData.FORMAT.write(newShardStateMetadata, shardPath.getShardStatePath()); } else { logger.trace("{} skip writing shard state, has been written before", shardId); } } - private String getIndexUUID() { - return indexSettings.getUUID(); - } - private DocumentMapperForType docMapper(String type) { return mapperService.documentMapperWithAutoCreate(type); } @@ -1919,29 +2030,47 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl * name. * * @param operationPrimaryTerm the operation primary term + * @param globalCheckpoint the global checkpoint associated with the request * @param onPermitAcquired the listener for permit acquisition * @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed */ - public void acquireReplicaOperationPermit( - final long operationPrimaryTerm, final ActionListener onPermitAcquired, final String executorOnDelay) { + public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final long globalCheckpoint, + final ActionListener onPermitAcquired, final String executorOnDelay) { verifyNotClosed(); verifyReplicationTarget(); + final boolean globalCheckpointUpdated; if (operationPrimaryTerm > primaryTerm) { synchronized (primaryTermMutex) { if (operationPrimaryTerm > primaryTerm) { + IndexShardState shardState = state(); + // only roll translog and update primary term if shard has made it past recovery + // Having a new primary term here means that the old primary failed and that there is a new primary, which again + // means that the master will fail this shard as all initializing shards are failed when a primary is selected + // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint + if (shardState != IndexShardState.POST_RECOVERY && + shardState != IndexShardState.STARTED && + shardState != IndexShardState.RELOCATED) { + throw new IndexShardNotStartedException(shardId, shardState); + } try { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { assert operationPrimaryTerm > primaryTerm : "shard term already update. op term [" + operationPrimaryTerm + "], shardTerm [" + primaryTerm + "]"; primaryTerm = operationPrimaryTerm; + updateGlobalCheckpointOnReplica(globalCheckpoint); getEngine().getTranslog().rollGeneration(); }); + globalCheckpointUpdated = true; } catch (final Exception e) { onPermitAcquired.onFailure(e); return; } + } else { + globalCheckpointUpdated = false; } } + } else { + globalCheckpointUpdated = false; } assert operationPrimaryTerm <= primaryTerm @@ -1960,6 +2089,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl primaryTerm); onPermitAcquired.onFailure(new IllegalStateException(message)); } else { + if (globalCheckpointUpdated == false) { + try { + updateGlobalCheckpointOnReplica(globalCheckpoint); + } catch (Exception e) { + releasable.close(); + onPermitAcquired.onFailure(e); + return; + } + } onPermitAcquired.onResponse(releasable); } } @@ -2048,7 +2186,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } @Override - protected void doRun() throws Exception { + protected void doRun() throws IOException { flush(new FlushRequest()); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index c2019e8c52a..f108300b95b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.NoLockFactory; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.store.Store; import java.io.Closeable; @@ -60,6 +61,14 @@ final class LocalShardSnapshot implements Closeable { return shard.indexSettings().getIndex(); } + long maxSeqNo() { + return shard.getEngine().seqNoService().getMaxSeqNo(); + } + + long maxUnsafeAutoIdTimestamp() { + return Long.parseLong(shard.getEngine().commitStats().getUserData().get(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)); + } + Directory getSnapshotDirectory() { /* this directory will not be used for anything else but reading / copying files to another directory * we prevent all write operations on this directory with UOE - nobody should close it either. */ diff --git a/core/src/main/java/org/elasticsearch/index/shard/PrimaryContext.java b/core/src/main/java/org/elasticsearch/index/shard/PrimaryContext.java new file mode 100644 index 00000000000..8a067d37181 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/PrimaryContext.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import com.carrotsearch.hppc.ObjectLongHashMap; +import com.carrotsearch.hppc.ObjectLongMap; +import com.carrotsearch.hppc.cursors.ObjectLongCursor; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Represents the sequence number component of the primary context. This is the knowledge on the primary of the in-sync and initializing + * shards and their local checkpoints. + */ +public class PrimaryContext implements Writeable { + + private long clusterStateVersion; + + public long clusterStateVersion() { + return clusterStateVersion; + } + + private ObjectLongMap inSyncLocalCheckpoints; + + public ObjectLongMap inSyncLocalCheckpoints() { + return inSyncLocalCheckpoints; + } + + private ObjectLongMap trackingLocalCheckpoints; + + public ObjectLongMap trackingLocalCheckpoints() { + return trackingLocalCheckpoints; + } + + public PrimaryContext( + final long clusterStateVersion, + final ObjectLongMap inSyncLocalCheckpoints, + final ObjectLongMap trackingLocalCheckpoints) { + this.clusterStateVersion = clusterStateVersion; + this.inSyncLocalCheckpoints = inSyncLocalCheckpoints; + this.trackingLocalCheckpoints = trackingLocalCheckpoints; + } + + public PrimaryContext(final StreamInput in) throws IOException { + clusterStateVersion = in.readVLong(); + inSyncLocalCheckpoints = readMap(in); + trackingLocalCheckpoints = readMap(in); + } + + private static ObjectLongMap readMap(final StreamInput in) throws IOException { + final int length = in.readVInt(); + final ObjectLongMap map = new ObjectLongHashMap<>(length); + for (int i = 0; i < length; i++) { + final String key = in.readString(); + final long value = in.readZLong(); + map.addTo(key, value); + } + return map; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVLong(clusterStateVersion); + writeMap(out, inSyncLocalCheckpoints); + writeMap(out, trackingLocalCheckpoints); + } + + private static void writeMap(final StreamOutput out, final ObjectLongMap map) throws IOException { + out.writeVInt(map.size()); + for (ObjectLongCursor cursor : map) { + out.writeString(cursor.key); + out.writeZLong(cursor.value); + } + } + + @Override + public String toString() { + return "PrimaryContext{" + + "clusterStateVersion=" + clusterStateVersion + + ", inSyncLocalCheckpoints=" + inSyncLocalCheckpoints + + ", trackingLocalCheckpoints=" + trackingLocalCheckpoints + + '}'; + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java new file mode 100644 index 00000000000..4641675afef --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -0,0 +1,392 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.resync.ResyncReplicationRequest; +import org.elasticsearch.action.resync.ResyncReplicationResponse; +import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Objects.requireNonNull; + +public class PrimaryReplicaSyncer extends AbstractComponent { + + private final TaskManager taskManager; + private final SyncAction syncAction; + + public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); + + private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; + + @Inject + public PrimaryReplicaSyncer(Settings settings, TransportService transportService, TransportResyncReplicationAction syncAction) { + this(settings, transportService.getTaskManager(), syncAction); + } + + // for tests + public PrimaryReplicaSyncer(Settings settings, TaskManager taskManager, SyncAction syncAction) { + super(settings); + this.taskManager = taskManager; + this.syncAction = syncAction; + } + + void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests + if (chunkSize.bytesAsInt() <= 0) { + throw new IllegalArgumentException("chunkSize must be > 0"); + } + this.chunkSize = chunkSize; + } + + public void resync(IndexShard indexShard, ActionListener listener) throws IOException { + try (Translog.View view = indexShard.acquireTranslogView()) { + final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1; + Translog.Snapshot snapshot = view.snapshot(startingSeqNo); + ShardId shardId = indexShard.shardId(); + + // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. + // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible + // Also fail the resync early if the shard is shutting down + Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { + + @Override + public synchronized int totalOperations() { + return snapshot.totalOperations(); + } + + @Override + public synchronized Translog.Operation next() throws IOException { + if (indexShard.state() != IndexShardState.STARTED) { + assert indexShard.state() != IndexShardState.RELOCATED : "resync should never happen on a relocated shard"; + throw new IndexShardNotStartedException(shardId, indexShard.state()); + } + return snapshot.next(); + } + }; + + resync(shardId, indexShard.routingEntry().allocationId().getId(), wrappedSnapshot, + startingSeqNo, listener); + } + } + + private void resync(final ShardId shardId, final String primaryAllocationId, final Translog.Snapshot snapshot, + long startingSeqNo, ActionListener listener) { + ResyncRequest request = new ResyncRequest(shardId, primaryAllocationId); + ResyncTask resyncTask = (ResyncTask) taskManager.register("transport", "resync", request); // it's not transport :-) + ActionListener wrappedListener = new ActionListener() { + @Override + public void onResponse(Void ignore) { + resyncTask.setPhase("finished"); + taskManager.unregister(resyncTask); + listener.onResponse(resyncTask); + } + + @Override + public void onFailure(Exception e) { + resyncTask.setPhase("finished"); + taskManager.unregister(resyncTask); + listener.onFailure(e); + } + }; + try { + new SnapshotSender(logger, syncAction, resyncTask, shardId, primaryAllocationId, snapshot, chunkSize.bytesAsInt(), + startingSeqNo, wrappedListener).run(); + } catch (Exception e) { + wrappedListener.onFailure(e); + } + } + + public interface SyncAction { + void sync(ResyncReplicationRequest request, Task parentTask, String primaryAllocationId, + ActionListener listener); + } + + static class SnapshotSender extends AbstractRunnable implements ActionListener { + private final Logger logger; + private final SyncAction syncAction; + private final ResyncTask task; // to track progress + private final String primaryAllocationId; + private final ShardId shardId; + private final Translog.Snapshot snapshot; + private final long startingSeqNo; + private final int chunkSizeInBytes; + private final ActionListener listener; + private final AtomicInteger totalSentOps = new AtomicInteger(); + private final AtomicInteger totalSkippedOps = new AtomicInteger(); + private AtomicBoolean closed = new AtomicBoolean(); + + SnapshotSender(Logger logger, SyncAction syncAction, ResyncTask task, ShardId shardId, String primaryAllocationId, + Translog.Snapshot snapshot, int chunkSizeInBytes, long startingSeqNo, ActionListener listener) { + this.logger = logger; + this.syncAction = syncAction; + this.task = task; + this.shardId = shardId; + this.primaryAllocationId = primaryAllocationId; + this.snapshot = snapshot; + this.chunkSizeInBytes = chunkSizeInBytes; + this.startingSeqNo = startingSeqNo; + this.listener = listener; + task.setTotalOperations(snapshot.totalOperations()); + } + + @Override + public void onResponse(ResyncReplicationResponse response) { + run(); + } + + @Override + public void onFailure(Exception e) { + if (closed.compareAndSet(false, true)) { + listener.onFailure(e); + } + } + + @Override + protected void doRun() throws Exception { + long size = 0; + final List operations = new ArrayList<>(); + + task.setPhase("collecting_ops"); + task.setResyncedOperations(totalSentOps.get()); + task.setSkippedOperations(totalSkippedOps.get()); + + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + final long seqNo = operation.seqNo(); + if (startingSeqNo >= 0 && + (seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) { + totalSkippedOps.incrementAndGet(); + continue; + } + operations.add(operation); + size += operation.estimateSize(); + totalSentOps.incrementAndGet(); + + // check if this request is past bytes threshold, and if so, send it off + if (size >= chunkSizeInBytes) { + break; + } + } + + if (!operations.isEmpty()) { + task.setPhase("sending_ops"); + ResyncReplicationRequest request = new ResyncReplicationRequest(shardId, operations); + logger.trace("{} sending batch of [{}][{}] (total sent: [{}], skipped: [{}])", shardId, operations.size(), + new ByteSizeValue(size), totalSentOps.get(), totalSkippedOps.get()); + syncAction.sync(request, task, primaryAllocationId, this); + } else if (closed.compareAndSet(false, true)) { + logger.trace("{} resync completed (total sent: [{}], skipped: [{}])", shardId, totalSentOps.get(), totalSkippedOps.get()); + listener.onResponse(null); + } + } + } + + public static class ResyncRequest extends ActionRequest { + + private final ShardId shardId; + private final String allocationId; + + public ResyncRequest(ShardId shardId, String allocationId) { + this.shardId = shardId; + this.allocationId = allocationId; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new ResyncTask(id, type, action, getDescription(), parentTaskId); + } + + @Override + public String getDescription() { + return toString(); + } + + @Override + public String toString() { + return "ResyncRequest{ " + shardId + ", " + allocationId + " }"; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class ResyncTask extends Task { + private volatile String phase = "starting"; + private volatile int totalOperations; + private volatile int resyncedOperations; + private volatile int skippedOperations; + + public ResyncTask(long id, String type, String action, String description, TaskId parentTaskId) { + super(id, type, action, description, parentTaskId); + } + + /** + * Set the current phase of the task. + */ + public void setPhase(String phase) { + this.phase = phase; + } + + /** + * Get the current phase of the task. + */ + public String getPhase() { + return phase; + } + + /** + * total number of translog operations that were captured by translog snapshot + */ + public int getTotalOperations() { + return totalOperations; + } + + public void setTotalOperations(int totalOperations) { + this.totalOperations = totalOperations; + } + + /** + * number of operations that have been successfully replicated + */ + public int getResyncedOperations() { + return resyncedOperations; + } + + public void setResyncedOperations(int resyncedOperations) { + this.resyncedOperations = resyncedOperations; + } + + /** + * number of translog operations that have been skipped + */ + public int getSkippedOperations() { + return skippedOperations; + } + + public void setSkippedOperations(int skippedOperations) { + this.skippedOperations = skippedOperations; + } + + @Override + public ResyncTask.Status getStatus() { + return new ResyncTask.Status(phase, totalOperations, resyncedOperations, skippedOperations); + } + + public static class Status implements Task.Status { + public static final String NAME = "resync"; + + private final String phase; + private final int totalOperations; + private final int resyncedOperations; + private final int skippedOperations; + + public Status(StreamInput in) throws IOException { + phase = in.readString(); + totalOperations = in.readVInt(); + resyncedOperations = in.readVInt(); + skippedOperations = in.readVInt(); + } + + public Status(String phase, int totalOperations, int resyncedOperations, int skippedOperations) { + this.phase = requireNonNull(phase, "Phase cannot be null"); + this.totalOperations = totalOperations; + this.resyncedOperations = resyncedOperations; + this.skippedOperations = skippedOperations; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("phase", phase); + builder.field("totalOperations", totalOperations); + builder.field("resyncedOperations", resyncedOperations); + builder.field("skippedOperations", skippedOperations); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(phase); + out.writeVLong(totalOperations); + out.writeVLong(resyncedOperations); + out.writeVLong(skippedOperations); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Status status = (Status) o; + + if (totalOperations != status.totalOperations) return false; + if (resyncedOperations != status.resyncedOperations) return false; + if (skippedOperations != status.skippedOperations) return false; + return phase.equals(status.phase); + } + + @Override + public int hashCode() { + int result = phase.hashCode(); + result = 31 * result + totalOperations; + result = 31 * result + resyncedOperations; + result = 31 * result + skippedOperations; + return result; + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index b2e94165640..078e8b06d6e 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -40,7 +40,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; @@ -49,6 +51,7 @@ import org.elasticsearch.repositories.Repository; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -115,9 +118,11 @@ final class StoreRecovery { logger.debug("starting recovery from local shards {}", shards); try { final Directory directory = indexShard.store().directory(); // don't close this directory!! - addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, - shards.stream().map(s -> s.getSnapshotDirectory()) - .collect(Collectors.toList()).toArray(new Directory[shards.size()])); + final Directory[] sources = shards.stream().map(LocalShardSnapshot::getSnapshotDirectory).toArray(Directory[]::new); + final long maxSeqNo = shards.stream().mapToLong(LocalShardSnapshot::maxSeqNo).max().getAsLong(); + final long maxUnsafeAutoIdTimestamp = + shards.stream().mapToLong(LocalShardSnapshot::maxUnsafeAutoIdTimestamp).max().getAsLong(); + addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, sources, maxSeqNo, maxUnsafeAutoIdTimestamp); internalRecoverFromStore(indexShard); // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. @@ -131,8 +136,14 @@ final class StoreRecovery { return false; } - void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Sort indexSort, Directory... sources) throws IOException { - target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); + void addIndices( + final RecoveryState.Index indexRecoveryStats, + final Directory target, + final Sort indexSort, + final Directory[] sources, + final long maxSeqNo, + final long maxUnsafeAutoIdTimestamp) throws IOException { + final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); IndexWriterConfig iwc = new IndexWriterConfig(null) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine @@ -143,8 +154,20 @@ final class StoreRecovery { if (indexSort != null) { iwc.setIndexSort(indexSort); } - try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats), iwc)) { + try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(hardLinkOrCopyTarget, indexRecoveryStats), iwc)) { writer.addIndexes(sources); + /* + * We set the maximum sequence number and the local checkpoint on the target to the maximum of the maximum sequence numbers on + * the source shards. This ensures that history after this maximum sequence number can advance and we have correct + * document-level semantics. + */ + writer.setLiveCommitData(() -> { + final HashMap liveCommitData = new HashMap<>(2); + liveCommitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); + liveCommitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); + liveCommitData.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp)); + return liveCommitData.entrySet().iterator(); + }); writer.commit(); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogOpToEngineOpConverter.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogOpToEngineOpConverter.java deleted file mode 100644 index 372e8f4e25a..00000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogOpToEngineOpConverter.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.shard; - -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.DocumentMapperForType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.translog.Translog; - -import static org.elasticsearch.index.mapper.SourceToParse.source; - -/** - * The TranslogOpToEngineOpConverter encapsulates all the logic needed to transform a translog entry into an - * indexing operation including source parsing and field creation from the source. - */ -public class TranslogOpToEngineOpConverter { - private final MapperService mapperService; - private final ShardId shardId; - - protected TranslogOpToEngineOpConverter(ShardId shardId, MapperService mapperService) { - this.shardId = shardId; - this.mapperService = mapperService; - } - - protected DocumentMapperForType docMapper(String type) { - return mapperService.documentMapperWithAutoCreate(type); // protected for testing - } - - public Engine.Operation convertToEngineOp(Translog.Operation operation, Engine.Operation.Origin origin) { - switch (operation.opType()) { - case INDEX: - final Translog.Index index = (Translog.Index) operation; - // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all - // autoGeneratedID docs that are coming from the primary are updated correctly. - final Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), - source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source())) - .routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(), - index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, - index.getAutoGeneratedIdTimestamp(), true); - return engineIndex; - case DELETE: - final Translog.Delete delete = (Translog.Delete) operation; - final Engine.Delete engineDelete = new Engine.Delete(delete.type(), delete.id(), delete.uid(), delete.seqNo(), - delete.primaryTerm(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), - origin, System.nanoTime()); - return engineDelete; - case NO_OP: - final Translog.NoOp noOp = (Translog.NoOp) operation; - final Engine.NoOp engineNoOp = - new Engine.NoOp(noOp.seqNo(), noOp.primaryTerm(), origin, System.nanoTime(), noOp.reason()); - return engineNoOp; - default: - throw new IllegalStateException("No operation defined for [" + operation + "]"); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 8291e76c2ac..495f1dc4bdb 100644 --- a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -34,12 +34,12 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -235,9 +235,9 @@ public class TermVectorsService { return selectedFields; } - private static Fields generateTermVectors(IndexShard indexShard, Map source, Collection getFields, boolean withOffsets, @Nullable Map perFieldAnalyzer, Set fields) throws IOException { + private static Fields generateTermVectors(IndexShard indexShard, Map source, Collection getFields, boolean withOffsets, @Nullable Map perFieldAnalyzer, Set fields) throws IOException { Map> values = new HashMap<>(); - for (GetField getField : getFields) { + for (DocumentField getField : getFields) { String field = getField.getName(); if (fields.contains(field)) { // some fields are returned even when not asked for, eg. _timestamp values.put(field, getField.getValues()); @@ -279,7 +279,7 @@ public class TermVectorsService { // select the right fields and generate term vectors ParseContext.Document doc = parsedDocument.rootDoc(); Set seenFields = new HashSet<>(); - Collection getFields = new HashSet<>(); + Collection documentFields = new HashSet<>(); for (IndexableField field : doc.getFields()) { MappedFieldType fieldType = indexShard.mapperService().fullName(field.name()); if (!isValidField(fieldType)) { @@ -295,10 +295,10 @@ public class TermVectorsService { seenFields.add(field.name()); } String[] values = doc.getValues(field.name()); - getFields.add(new GetField(field.name(), Arrays.asList((Object[]) values))); + documentFields.add(new DocumentField(field.name(), Arrays.asList((Object[]) values))); } return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), - getFields, request.offsets(), request.perFieldAnalyzer(), seenFields); + documentFields, request.offsets(), request.perFieldAnalyzer(), seenFields); } private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc, diff --git a/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index 6f392c195fd..7f8b7f3fb2c 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.Files; import java.nio.file.Path; /** @@ -121,4 +122,8 @@ public abstract class BaseTranslogReader implements Comparable this.indexSettings.getFlushThresholdSize().getBytes(); } @@ -560,6 +594,25 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } + private Stream readersAboveMinSeqNo(long minSeqNo) { + assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread() : + "callers of readersAboveMinSeqNo must hold a lock: readLock [" + + readLock.isHeldByCurrentThread() + "], writeLock [" + readLock.isHeldByCurrentThread() + "]"; + return Stream.concat(readers.stream(), Stream.of(current)) + .filter(reader -> { + final long maxSeqNo = reader.getCheckpoint().maxSeqNo; + return maxSeqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO || maxSeqNo >= minSeqNo; + }); + } + + private Snapshot createSnapshotFromMinSeqNo(long minSeqNo) { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + Snapshot[] snapshots = readersAboveMinSeqNo(minSeqNo).map(BaseTranslogReader::newSnapshot).toArray(Snapshot[]::new); + return new MultiSnapshot(snapshots); + } + } + /** * Returns a view into the current translog that is guaranteed to retain all current operations * while receiving future ones as well @@ -567,7 +620,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public Translog.View newView() { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); - final long viewGen = deletionPolicy.acquireTranslogGenForView(); + final long viewGen = getMinFileGeneration(); + deletionPolicy.acquireTranslogGenForView(viewGen); try { return new View(viewGen); } catch (Exception e) { @@ -674,7 +728,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public TranslogStats stats() { // acquire lock to make the two numbers roughly consistent (no file change half way) try (ReleasableLock lock = readLock.acquire()) { - return new TranslogStats(totalOperations(), sizeInBytes()); + return new TranslogStats(totalOperations(), sizeInBytes(), uncommittedOperations(), uncommittedSizeInBytes()); } } @@ -698,35 +752,36 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public class View implements Closeable { AtomicBoolean closed = new AtomicBoolean(); - final long minGeneration; + final long viewGenToRelease; - View(long minGeneration) { - this.minGeneration = minGeneration; - } - - /** this smallest translog generation in this view */ - public long minTranslogGeneration() { - return minGeneration; + View(long viewGenToRelease) { + this.viewGenToRelease = viewGenToRelease; } /** - * The total number of operations in the view. + * The total number of operations in the view files which contain an operation with a sequence number + * above the given min sequence numbers. This will be the number of operations in snapshot taken + * by calling {@link #snapshot(long)} with the same parameter. */ - public int totalOperations() { - return Translog.this.totalOperations(minGeneration); + public int estimateTotalOperations(long minSequenceNumber) { + return Translog.this.totalOperationsInGensAboveSeqNo(minSequenceNumber); } /** - * Returns the size in bytes of the files behind the view. + * The total size of the view files which contain an operation with a sequence number + * above the given min sequence numbers. These are the files that would need to be read by snapshot + * acquired {@link #snapshot(long)} with the same parameter. */ - public long sizeInBytes() { - return Translog.this.sizeInBytes(minGeneration); + public long estimateSizeInBytes(long minSequenceNumber) { + return Translog.this.sizeOfGensAboveSeqNoInBytes(minSequenceNumber); } - /** create a snapshot from this view */ - public Snapshot snapshot() { + /** + * create a snapshot from this view, containing all + * operations from the given sequence number and up (with potentially some more) */ + public Snapshot snapshot(long minSequenceNumber) { ensureOpen(); - return Translog.this.newSnapshot(minGeneration); + return Translog.this.createSnapshotFromMinSeqNo(minSequenceNumber); } void ensureOpen() { @@ -738,8 +793,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC @Override public void close() throws IOException { if (closed.getAndSet(true) == false) { - logger.trace("closing view starting at translog [{}]", minGeneration); - deletionPolicy.releaseTranslogGenView(minGeneration); + logger.trace("closing view starting at translog [{}]", viewGenToRelease); + deletionPolicy.releaseTranslogGenView(viewGenToRelease); trimUnreferencedReaders(); closeFilesIfNoPendingViews(); } @@ -1522,7 +1577,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC // we're shutdown potentially on some tragic event, don't delete anything return; } - long minReferencedGen = deletionPolicy.minTranslogGenRequired(); + long minReferencedGen = deletionPolicy.minTranslogGenRequired(readers, current); assert minReferencedGen >= getMinFileGeneration() : "deletion policy requires a minReferenceGen of [" + minReferencedGen + "] but the lowest gen available is [" + getMinFileGeneration() + "]"; @@ -1663,4 +1718,12 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return translogUUID; } + + TranslogWriter getCurrent() { + return current; + } + + List getReaders() { + return readers; + } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java index 84f61a642cc..e1b1147b8cf 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java @@ -21,13 +21,17 @@ package org.elasticsearch.index.translog; import org.apache.lucene.util.Counter; +import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; public class TranslogDeletionPolicy { - /** Records how many views are held against each - * translog generation */ + /** + * Records how many views are held against each + * translog generation + */ private final Map translogRefCounts = new HashMap<>(); /** @@ -36,21 +40,37 @@ public class TranslogDeletionPolicy { */ private long minTranslogGenerationForRecovery = 1; + private long retentionSizeInBytes; + + private long retentionAgeInMillis; + + public TranslogDeletionPolicy(long retentionSizeInBytes, long retentionAgeInMillis) { + this.retentionSizeInBytes = retentionSizeInBytes; + this.retentionAgeInMillis = retentionAgeInMillis; + } + public synchronized void setMinTranslogGenerationForRecovery(long newGen) { if (newGen < minTranslogGenerationForRecovery) { throw new IllegalArgumentException("minTranslogGenerationForRecovery can't go backwards. new [" + newGen + "] current [" + - minTranslogGenerationForRecovery+ "]"); + minTranslogGenerationForRecovery + "]"); } minTranslogGenerationForRecovery = newGen; } + public synchronized void setRetentionSizeInBytes(long bytes) { + retentionSizeInBytes = bytes; + } + + public synchronized void setRetentionAgeInMillis(long ageInMillis) { + retentionAgeInMillis = ageInMillis; + } + /** * acquires the basis generation for a new view. Any translog generation above, and including, the returned generation * will not be deleted until a corresponding call to {@link #releaseTranslogGenView(long)} is called. */ - synchronized long acquireTranslogGenForView() { - translogRefCounts.computeIfAbsent(minTranslogGenerationForRecovery, l -> Counter.newCounter(false)).addAndGet(1); - return minTranslogGenerationForRecovery; + synchronized void acquireTranslogGenForView(final long genForView) { + translogRefCounts.computeIfAbsent(genForView, l -> Counter.newCounter(false)).addAndGet(1); } /** returns the number of generations that were acquired for views */ @@ -59,7 +79,7 @@ public class TranslogDeletionPolicy { } /** - * releases a generation that was acquired by {@link #acquireTranslogGenForView()} + * releases a generation that was acquired by {@link #acquireTranslogGenForView(long)} */ synchronized void releaseTranslogGenView(long translogGen) { Counter current = translogRefCounts.get(translogGen); @@ -74,14 +94,68 @@ public class TranslogDeletionPolicy { /** * returns the minimum translog generation that is still required by the system. Any generation below * the returned value may be safely deleted + * + * @param readers current translog readers + * @param writer current translog writer */ - synchronized long minTranslogGenRequired() { - long viewRefs = translogRefCounts.keySet().stream().reduce(Math::min).orElse(Long.MAX_VALUE); - return Math.min(viewRefs, minTranslogGenerationForRecovery); + synchronized long minTranslogGenRequired(List readers, TranslogWriter writer) throws IOException { + long minByView = getMinTranslogGenRequiredByViews(); + long minByAge = getMinTranslogGenByAge(readers, writer, retentionAgeInMillis, currentTime()); + long minBySize = getMinTranslogGenBySize(readers, writer, retentionSizeInBytes); + final long minByAgeAndSize; + if (minBySize == Long.MIN_VALUE && minByAge == Long.MIN_VALUE) { + // both size and age are disabled; + minByAgeAndSize = Long.MAX_VALUE; + } else { + minByAgeAndSize = Math.max(minByAge, minBySize); + } + return Math.min(minByAgeAndSize, Math.min(minByView, minTranslogGenerationForRecovery)); + } + + static long getMinTranslogGenBySize(List readers, TranslogWriter writer, long retentionSizeInBytes) { + if (retentionSizeInBytes >= 0) { + long totalSize = writer.sizeInBytes(); + long minGen = writer.getGeneration(); + for (int i = readers.size() - 1; i >= 0 && totalSize < retentionSizeInBytes; i--) { + final TranslogReader reader = readers.get(i); + totalSize += reader.sizeInBytes(); + minGen = reader.getGeneration(); + } + return minGen; + } else { + return Long.MIN_VALUE; + } + } + + static long getMinTranslogGenByAge(List readers, TranslogWriter writer, long maxRetentionAgeInMillis, long now) + throws IOException { + if (maxRetentionAgeInMillis >= 0) { + for (TranslogReader reader: readers) { + if (now - reader.getLastModifiedTime() <= maxRetentionAgeInMillis) { + return reader.getGeneration(); + } + } + return writer.getGeneration(); + } else { + return Long.MIN_VALUE; + } + } + + protected long currentTime() { + return System.currentTimeMillis(); + } + + private long getMinTranslogGenRequiredByViews() { + return translogRefCounts.keySet().stream().reduce(Math::min).orElse(Long.MAX_VALUE); } /** returns the translog generation that will be used as a basis of a future store/peer recovery */ public synchronized long getMinTranslogGenerationForRecovery() { return minTranslogGenerationForRecovery; } + + synchronized long getViewCount(long viewGen) { + final Counter counter = translogRefCounts.get(viewGen); + return counter == null ? 0 : counter.get(); + } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 9057207501c..46439afead1 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -116,7 +116,7 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); case TranslogWriter.VERSION_CHECKPOINTS: assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX) : "new file ends with old suffix: " + path; - assert checkpoint.numOps >= 0 : "expected at least 0 operatin but got: " + checkpoint.numOps; + assert checkpoint.numOps >= 0 : "expected at least 0 operation but got: " + checkpoint.numOps; assert checkpoint.offset <= channel.size() : "checkpoint is inconsistent with channel length: " + channel.size() + " " + checkpoint; int len = headerStream.readInt(); if (len > channel.size()) { @@ -130,8 +130,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { throw new TranslogCorruptedException("expected shard UUID " + uuidBytes + " but got: " + ref + " this translog file belongs to a different translog. path:" + path); } - final long firstOperationOffset = - ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES; + final long firstOperationOffset; + firstOperationOffset = ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES; return new TranslogReader(checkpoint, channel, path, firstOperationOffset); default: diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 908cf511db0..312b7fc9db0 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -99,7 +99,7 @@ final class TranslogSnapshot extends BaseTranslogReader implements Translog.Snap return "TranslogSnapshot{" + "readOperations=" + readOperations + ", position=" + position + - ", totalOperations=" + totalOperations + + ", estimateTotalOperations=" + totalOperations + ", length=" + length + ", reusableBuffer=" + reusableBuffer + '}'; diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index e60fd2086b9..4b7a092a5ec 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.translog; +import org.elasticsearch.Version; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,20 +31,29 @@ public class TranslogStats extends ToXContentToBytes implements Streamable { private long translogSizeInBytes; private int numberOfOperations; + private long uncommittedSizeInBytes; + private int uncommittedOperations; public TranslogStats() { } - public TranslogStats(int numberOfOperations, long translogSizeInBytes) { + public TranslogStats(int numberOfOperations, long translogSizeInBytes, int uncommittedOperations, long uncommittedSizeInBytes) { if (numberOfOperations < 0) { throw new IllegalArgumentException("numberOfOperations must be >= 0"); } if (translogSizeInBytes < 0) { throw new IllegalArgumentException("translogSizeInBytes must be >= 0"); } - assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >= 0, got [" + translogSizeInBytes + "]"; + if (uncommittedOperations < 0) { + throw new IllegalArgumentException("uncommittedOperations must be >= 0"); + } + if (uncommittedSizeInBytes < 0) { + throw new IllegalArgumentException("uncommittedSizeInBytes must be >= 0"); + } this.numberOfOperations = numberOfOperations; this.translogSizeInBytes = translogSizeInBytes; + this.uncommittedSizeInBytes = uncommittedSizeInBytes; + this.uncommittedOperations = uncommittedOperations; } public void add(TranslogStats translogStats) { @@ -53,41 +63,59 @@ public class TranslogStats extends ToXContentToBytes implements Streamable { this.numberOfOperations += translogStats.numberOfOperations; this.translogSizeInBytes += translogStats.translogSizeInBytes; + this.uncommittedOperations += translogStats.uncommittedOperations; + this.uncommittedSizeInBytes += translogStats.uncommittedSizeInBytes; } public long getTranslogSizeInBytes() { return translogSizeInBytes; } - public long estimatedNumberOfOperations() { + public int estimatedNumberOfOperations() { return numberOfOperations; } + /** the size of the generations in the translog that weren't yet to comitted to lucene */ + public long getUncommittedSizeInBytes() { + return uncommittedSizeInBytes; + } + + /** the number of operations in generations of the translog that weren't yet to comitted to lucene */ + public int getUncommittedOperations() { + return uncommittedOperations; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.TRANSLOG); - builder.field(Fields.OPERATIONS, numberOfOperations); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, translogSizeInBytes); + builder.startObject("translog"); + builder.field("operations", numberOfOperations); + builder.byteSizeField("size_in_bytes", "size", translogSizeInBytes); + builder.field("uncommitted_operations", uncommittedOperations); + builder.byteSizeField("uncommitted_size_in_bytes", "uncommitted_size", uncommittedSizeInBytes); builder.endObject(); return builder; } - static final class Fields { - static final String TRANSLOG = "translog"; - static final String OPERATIONS = "operations"; - static final String SIZE = "size"; - static final String SIZE_IN_BYTES = "size_in_bytes"; - } - @Override public void readFrom(StreamInput in) throws IOException { numberOfOperations = in.readVInt(); translogSizeInBytes = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha3)) { + uncommittedOperations = in.readVInt(); + uncommittedSizeInBytes = in.readVLong(); + } else { + uncommittedOperations = numberOfOperations; + uncommittedSizeInBytes = translogSizeInBytes; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(numberOfOperations); out.writeVLong(translogSizeInBytes); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha3)) { + out.writeVInt(uncommittedOperations); + out.writeVLong(uncommittedSizeInBytes); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index d637c9da79f..92851117093 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -88,6 +88,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { final ByteSizeValue bufferSize, final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier) throws IOException { super(initialCheckpoint.generation, channel, path, channel.position()); + assert initialCheckpoint.offset == channel.position() : + "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel poistion [" + + channel.position() + "]"; this.shardId = shardId; this.channelFactory = channelFactory; this.minTranslogGenerationSupplier = minTranslogGenerationSupplier; @@ -116,18 +119,12 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { out.writeBytes(ref.bytes, ref.offset, ref.length); } - public static TranslogWriter create( - ShardId shardId, - String translogUUID, - long fileGeneration, - Path file, - ChannelFactory channelFactory, - ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier, - final long initialMinTranslogGen, - final LongSupplier minTranslogGenerationSupplier) throws IOException { + public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, + ByteSizeValue bufferSize, final LongSupplier globalCheckpointSupplier, + final long initialMinTranslogGen, final LongSupplier minTranslogGenerationSupplier) + throws IOException { final BytesRef ref = new BytesRef(translogUUID); - final int headerLength = getHeaderLength(ref.length); + final int firstOperationOffset = getHeaderLength(ref.length); final FileChannel channel = channelFactory.open(file); try { // This OutputStreamDataOutput is intentionally not closed because @@ -135,12 +132,11 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { final OutputStreamDataOutput out = new OutputStreamDataOutput(java.nio.channels.Channels.newOutputStream(channel)); writeHeader(out, ref); channel.force(true); - final Checkpoint checkpoint = - Checkpoint.emptyTranslogCheckpoint(headerLength, fileGeneration, globalCheckpointSupplier.getAsLong(), - initialMinTranslogGen); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(firstOperationOffset, fileGeneration, + globalCheckpointSupplier.getAsLong(), initialMinTranslogGen); writeCheckpoint(channelFactory, file.getParent(), checkpoint); - return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, globalCheckpointSupplier, - minTranslogGenerationSupplier); + return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, + globalCheckpointSupplier, minTranslogGenerationSupplier); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -259,8 +255,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } @Override - Checkpoint getCheckpoint() { - return getLastSyncedCheckpoint(); + synchronized Checkpoint getCheckpoint() { + return new Checkpoint(totalOffset, operationCounter, generation, minSeqNo, maxSeqNo, + globalCheckpointSupplier.getAsLong(), minTranslogGenerationSupplier.getAsLong()); } @Override @@ -333,22 +330,12 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { if (lastSyncedCheckpoint.offset < offset && syncNeeded()) { // double checked locking - we don't want to fsync unless we have to and now that we have // the lock we should check again since if this code is busy we might have fsynced enough already - final long offsetToSync; - final int opsCounter; - final long currentMinSeqNo; - final long currentMaxSeqNo; - final long currentGlobalCheckpoint; - final long currentMinTranslogGeneration; + final Checkpoint checkpointToSync; synchronized (this) { ensureOpen(); try { outputStream.flush(); - offsetToSync = totalOffset; - opsCounter = operationCounter; - currentMinSeqNo = minSeqNo; - currentMaxSeqNo = maxSeqNo; - currentGlobalCheckpoint = globalCheckpointSupplier.getAsLong(); - currentMinTranslogGeneration = minTranslogGenerationSupplier.getAsLong(); + checkpointToSync = getCheckpoint(); } catch (Exception ex) { try { closeWithTragicEvent(ex); @@ -360,12 +347,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } // now do the actual fsync outside of the synchronized block such that // we can continue writing to the buffer etc. - final Checkpoint checkpoint; try { channel.force(false); - checkpoint = - writeCheckpoint(channelFactory, offsetToSync, opsCounter, currentMinSeqNo, currentMaxSeqNo, - currentGlobalCheckpoint, currentMinTranslogGeneration, path.getParent(), generation); + writeCheckpoint(channelFactory, path.getParent(), checkpointToSync); } catch (Exception ex) { try { closeWithTragicEvent(ex); @@ -374,9 +358,9 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { } throw ex; } - assert lastSyncedCheckpoint.offset <= offsetToSync : - "illegal state: " + lastSyncedCheckpoint.offset + " <= " + offsetToSync; - lastSyncedCheckpoint = checkpoint; // write protected by syncLock + assert lastSyncedCheckpoint.offset <= checkpointToSync.offset : + "illegal state: " + lastSyncedCheckpoint.offset + " <= " + checkpointToSync.offset; + lastSyncedCheckpoint = checkpointToSync; // write protected by syncLock return true; } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index e8a36c6006c..bbcc508dbd5 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,6 +22,8 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; @@ -165,6 +167,8 @@ public class IndicesModule extends AbstractModule { bind(SyncedFlushService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(GlobalCheckpointSyncAction.class).asEagerSingleton(); + bind(TransportResyncReplicationAction.class).asEagerSingleton(); + bind(PrimaryReplicaSyncer.class).asEagerSingleton(); } /** diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index e486aede53f..eb138a04588 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -22,6 +22,7 @@ package org.elasticsearch.indices; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; @@ -292,35 +293,49 @@ public class IndicesService extends AbstractLifecycleComponent } } - Map> statsByShard = new HashMap<>(); - for (IndexService indexService : this) { - for (IndexShard indexShard : indexService) { + return new NodeIndicesStats(oldStats, statsByShard(this, flags)); + } + + Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { + final Map> statsByShard = new HashMap<>(); + + for (final IndexService indexService : indicesService) { + for (final IndexShard indexShard : indexService) { try { - if (indexShard.routingEntry() == null) { + final IndexShardStats indexShardStats = indicesService.indexShardStats(indicesService, indexShard, flags); + + if (indexShardStats == null) { continue; } - IndexShardStats indexShardStats = - new IndexShardStats(indexShard.shardId(), - new ShardStats[]{ - new ShardStats( - indexShard.routingEntry(), - indexShard.shardPath(), - new CommonStats(indicesQueryCache, indexShard, flags), - indexShard.commitStats(), - indexShard.seqNoStats())}); - if (!statsByShard.containsKey(indexService.index())) { + if (statsByShard.containsKey(indexService.index()) == false) { statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); } else { statsByShard.get(indexService.index()).add(indexShardStats); } - } catch (IllegalIndexShardStateException e) { + } catch (IllegalIndexShardStateException | AlreadyClosedException e) { // we can safely ignore illegal state on ones that are closing for example logger.trace((Supplier) () -> new ParameterizedMessage("{} ignoring shard stats", indexShard.shardId()), e); } } } - return new NodeIndicesStats(oldStats, statsByShard); + + return statsByShard; + } + + IndexShardStats indexShardStats(final IndicesService indicesService, final IndexShard indexShard, final CommonStatsFlags flags) { + if (indexShard.routingEntry() == null) { + return null; + } + + return new IndexShardStats(indexShard.shardId(), + new ShardStats[] { + new ShardStats(indexShard.routingEntry(), + indexShard.shardPath(), + new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), + indexShard.commitStats(), + indexShard.seqNoStats()) + }); } /** diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 3f26b722f41..2657c9f7981 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -54,14 +54,11 @@ import org.elasticsearch.index.analysis.DecimalDigitFilterFactory; import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory; import org.elasticsearch.index.analysis.DutchAnalyzerProvider; import org.elasticsearch.index.analysis.DutchStemTokenFilterFactory; -import org.elasticsearch.index.analysis.EdgeNGramTokenFilterFactory; import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory; -import org.elasticsearch.index.analysis.ElisionTokenFilterFactory; import org.elasticsearch.index.analysis.EnglishAnalyzerProvider; import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider; import org.elasticsearch.index.analysis.FingerprintTokenFilterFactory; import org.elasticsearch.index.analysis.FinnishAnalyzerProvider; -import org.elasticsearch.index.analysis.FlattenGraphTokenFilterFactory; import org.elasticsearch.index.analysis.FrenchAnalyzerProvider; import org.elasticsearch.index.analysis.FrenchStemTokenFilterFactory; import org.elasticsearch.index.analysis.GalicianAnalyzerProvider; @@ -77,20 +74,16 @@ import org.elasticsearch.index.analysis.IndicNormalizationFilterFactory; import org.elasticsearch.index.analysis.IndonesianAnalyzerProvider; import org.elasticsearch.index.analysis.IrishAnalyzerProvider; import org.elasticsearch.index.analysis.ItalianAnalyzerProvider; -import org.elasticsearch.index.analysis.KStemTokenFilterFactory; import org.elasticsearch.index.analysis.KeepTypesFilterFactory; import org.elasticsearch.index.analysis.KeepWordFilterFactory; import org.elasticsearch.index.analysis.KeywordAnalyzerProvider; import org.elasticsearch.index.analysis.KeywordTokenizerFactory; import org.elasticsearch.index.analysis.LatvianAnalyzerProvider; -import org.elasticsearch.index.analysis.LengthTokenFilterFactory; import org.elasticsearch.index.analysis.LetterTokenizerFactory; import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory; import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider; -import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory; import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory; import org.elasticsearch.index.analysis.MinHashTokenFilterFactory; -import org.elasticsearch.index.analysis.NGramTokenFilterFactory; import org.elasticsearch.index.analysis.NGramTokenizerFactory; import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider; import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory; @@ -104,7 +97,6 @@ import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; -import org.elasticsearch.index.analysis.ReverseTokenFilterFactory; import org.elasticsearch.index.analysis.RomanianAnalyzerProvider; import org.elasticsearch.index.analysis.RussianAnalyzerProvider; import org.elasticsearch.index.analysis.RussianStemTokenFilterFactory; @@ -121,8 +113,6 @@ import org.elasticsearch.index.analysis.StandardAnalyzerProvider; import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzerProvider; import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; -import org.elasticsearch.index.analysis.StemmerOverrideTokenFilterFactory; -import org.elasticsearch.index.analysis.StemmerTokenFilterFactory; import org.elasticsearch.index.analysis.StopAnalyzerProvider; import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.SwedishAnalyzerProvider; @@ -130,15 +120,10 @@ import org.elasticsearch.index.analysis.ThaiAnalyzerProvider; import org.elasticsearch.index.analysis.ThaiTokenizerFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; -import org.elasticsearch.index.analysis.TruncateTokenFilterFactory; import org.elasticsearch.index.analysis.TurkishAnalyzerProvider; import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory; -import org.elasticsearch.index.analysis.UniqueTokenFilterFactory; -import org.elasticsearch.index.analysis.UpperCaseTokenFilterFactory; import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider; import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory; -import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory; -import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory; import org.elasticsearch.plugins.AnalysisPlugin; import java.io.IOException; @@ -208,32 +193,16 @@ public final class AnalysisModule { hunspellService) { NamedRegistry> tokenFilters = new NamedRegistry<>("token_filter"); tokenFilters.register("stop", StopTokenFilterFactory::new); - tokenFilters.register("reverse", ReverseTokenFilterFactory::new); - tokenFilters.register("length", LengthTokenFilterFactory::new); - tokenFilters.register("lowercase", LowerCaseTokenFilterFactory::new); - tokenFilters.register("uppercase", UpperCaseTokenFilterFactory::new); - tokenFilters.register("kstem", KStemTokenFilterFactory::new); tokenFilters.register("standard", StandardTokenFilterFactory::new); - tokenFilters.register("nGram", NGramTokenFilterFactory::new); - tokenFilters.register("ngram", NGramTokenFilterFactory::new); - tokenFilters.register("edgeNGram", EdgeNGramTokenFilterFactory::new); - tokenFilters.register("edge_ngram", EdgeNGramTokenFilterFactory::new); tokenFilters.register("shingle", ShingleTokenFilterFactory::new); tokenFilters.register("min_hash", MinHashTokenFilterFactory::new); - tokenFilters.register("unique", UniqueTokenFilterFactory::new); - tokenFilters.register("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new)); tokenFilters.register("limit", LimitTokenCountFilterFactory::new); tokenFilters.register("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new)); - tokenFilters.register("stemmer", StemmerTokenFilterFactory::new); tokenFilters.register("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new); - tokenFilters.register("elision", ElisionTokenFilterFactory::new); - tokenFilters.register("flatten_graph", FlattenGraphTokenFilterFactory::new); tokenFilters.register("keep", requriesAnalysisSettings(KeepWordFilterFactory::new)); tokenFilters.register("keep_types", requriesAnalysisSettings(KeepTypesFilterFactory::new)); tokenFilters.register("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); tokenFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceTokenFilterFactory::new)); - tokenFilters.register("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); - tokenFilters.register("hyphenation_decompounder", requriesAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new)); tokenFilters.register("arabic_stem", ArabicStemTokenFilterFactory::new); tokenFilters.register("brazilian_stem", BrazilianStemTokenFilterFactory::new); tokenFilters.register("czech_stem", CzechStemTokenFilterFactory::new); @@ -241,7 +210,6 @@ public final class AnalysisModule { tokenFilters.register("french_stem", FrenchStemTokenFilterFactory::new); tokenFilters.register("german_stem", GermanStemTokenFilterFactory::new); tokenFilters.register("russian_stem", RussianStemTokenFilterFactory::new); - tokenFilters.register("stemmer_override", requriesAnalysisSettings(StemmerOverrideTokenFilterFactory::new)); tokenFilters.register("arabic_normalization", ArabicNormalizationFilterFactory::new); tokenFilters.register("german_normalization", GermanNormalizationFilterFactory::new); tokenFilters.register("hindi_normalization", HindiNormalizationFilterFactory::new); diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java index 9cc9ed1ea23..23e5e679511 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.analysis; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.LetterTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; @@ -32,10 +31,7 @@ import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; import org.apache.lucene.analysis.th.ThaiTokenizer; import org.elasticsearch.Version; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.analysis.CustomNormalizerProvider; -import org.elasticsearch.index.analysis.MultiTermAwareComponent; import org.elasticsearch.index.analysis.TokenFilterFactory; -import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; public enum PreBuiltTokenizers { @@ -68,13 +64,6 @@ public enum PreBuiltTokenizers { } }, - KEYWORD(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new KeywordTokenizer(); - } - }, - LETTER(CachingStrategy.ONE) { @Override protected Tokenizer create(Version version) { @@ -125,50 +114,13 @@ public enum PreBuiltTokenizers { return null; } - protected final PreBuiltCacheFactory.PreBuiltCache cache; private final CachingStrategy cachingStrategy; PreBuiltTokenizers(CachingStrategy cachingStrategy) { this.cachingStrategy = cachingStrategy; - cache = PreBuiltCacheFactory.getCache(cachingStrategy); } public CachingStrategy getCachingStrategy() { return cachingStrategy; } - - private interface MultiTermAwareTokenizerFactory extends TokenizerFactory, MultiTermAwareComponent {} - - /** - * Old style resolution for {@link TokenizerFactory}. Exists entirely to keep - * {@link CustomNormalizerProvider#build(java.util.Map, java.util.Map)} working during the migration. - */ - public synchronized TokenizerFactory getTokenizerFactory(final Version version) { - TokenizerFactory tokenizerFactory = cache.get(version); - if (tokenizerFactory == null) { - if (getMultiTermComponent(version) != null) { - tokenizerFactory = new MultiTermAwareTokenizerFactory() { - @Override - public Tokenizer create() { - return PreBuiltTokenizers.this.create(version); - } - - @Override - public Object getMultiTermComponent() { - return PreBuiltTokenizers.this.getMultiTermComponent(version); - } - }; - } else { - tokenizerFactory = new TokenizerFactory() { - @Override - public Tokenizer create() { - return PreBuiltTokenizers.this.create(version); - } - }; - } - cache.put(version, tokenizerFactory); - } - - return tokenizerFactory; - } } diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 9d091429f22..97f57e216c0 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -25,6 +25,7 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; @@ -40,6 +41,7 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -59,6 +61,8 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRelocatedException; import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; @@ -112,6 +116,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final boolean sendRefreshMapping; private final List buildInIndexListener; + private final PrimaryReplicaSyncer primaryReplicaSyncer; @Inject public IndicesClusterStateService(Settings settings, IndicesService indicesService, ClusterService clusterService, @@ -121,11 +126,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple RepositoriesService repositoriesService, SearchService searchService, SyncedFlushService syncedFlushService, PeerRecoverySourceService peerRecoverySourceService, SnapshotShardsService snapshotShardsService, - GlobalCheckpointSyncAction globalCheckpointSyncAction) { + GlobalCheckpointSyncAction globalCheckpointSyncAction, + PrimaryReplicaSyncer primaryReplicaSyncer) { this(settings, (AllocatedIndices>) indicesService, clusterService, threadPool, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, repositoriesService, searchService, syncedFlushService, peerRecoverySourceService, - snapshotShardsService, globalCheckpointSyncAction); + snapshotShardsService, globalCheckpointSyncAction, primaryReplicaSyncer); } // for tests @@ -138,7 +144,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple RepositoriesService repositoriesService, SearchService searchService, SyncedFlushService syncedFlushService, PeerRecoverySourceService peerRecoverySourceService, SnapshotShardsService snapshotShardsService, - GlobalCheckpointSyncAction globalCheckpointSyncAction) { + GlobalCheckpointSyncAction globalCheckpointSyncAction, + PrimaryReplicaSyncer primaryReplicaSyncer) { super(settings); this.buildInIndexListener = Arrays.asList( @@ -155,6 +162,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple this.shardStateAction = shardStateAction; this.nodeMappingRefreshAction = nodeMappingRefreshAction; this.repositoriesService = repositoriesService; + this.primaryReplicaSyncer = primaryReplicaSyncer; this.sendRefreshMapping = this.settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); } @@ -549,20 +557,17 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple + "cluster state: " + shardRouting + " local: " + currentRoutingEntry; try { - shard.updateRoutingEntry(shardRouting); - if (shardRouting.primary()) { - final IndexShardRoutingTable indexShardRoutingTable = routingTable.shardRoutingTable(shardRouting.shardId()); - /* - * Filter to shards that track sequence numbers and should be taken into consideration for checkpoint tracking. Shards on - * old nodes will go through a file-based recovery which will also transfer sequence number information. - */ - final Set activeIds = - allocationIdsForShardsOnNodesThatUnderstandSeqNos(indexShardRoutingTable.activeShards(), nodes); - final Set initializingIds = - allocationIdsForShardsOnNodesThatUnderstandSeqNos(indexShardRoutingTable.getAllInitializingShards(), nodes); - shard.updatePrimaryTerm(clusterState.metaData().index(shard.shardId().getIndex()).primaryTerm(shard.shardId().id())); - shard.updateAllocationIdsFromMaster(activeIds, initializingIds); - } + final long primaryTerm = clusterState.metaData().index(shard.shardId().getIndex()).primaryTerm(shard.shardId().id()); + final IndexShardRoutingTable indexShardRoutingTable = routingTable.shardRoutingTable(shardRouting.shardId()); + /* + * Filter to shards that track sequence numbers and should be taken into consideration for checkpoint tracking. Shards on old + * nodes will go through a file-based recovery which will also transfer sequence number information. + */ + final Set activeIds = allocationIdsForShardsOnNodesThatUnderstandSeqNos(indexShardRoutingTable.activeShards(), nodes); + final Set initializingIds = + allocationIdsForShardsOnNodesThatUnderstandSeqNos(indexShardRoutingTable.getAllInitializingShards(), nodes); + shard.updateShardState( + shardRouting, primaryTerm, primaryReplicaSyncer::resync, clusterState.version(), activeIds, initializingIds); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); return; @@ -729,29 +734,27 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple RecoveryState recoveryState(); /** - * Updates the shards routing entry. This mutate the shards internal state depending - * on the changes that get introduced by the new routing value. This method will persist shard level metadata. + * Updates the shard state based on an incoming cluster state: + * - Updates and persists the new routing value. + * - Updates the primary term if this shard is a primary. + * - Updates the allocation ids that are tracked by the shard if it is a primary. + * See {@link GlobalCheckpointTracker#updateAllocationIdsFromMaster(long, Set, Set)} for details. * + * @param shardRouting the new routing entry + * @param primaryTerm the new primary term + * @param primaryReplicaSyncer the primary-replica resync action to trigger when a term is increased on a primary + * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the master + * @param activeAllocationIds the allocation ids of the currently active shard copies + * @param initializingAllocationIds the allocation ids of the currently initializing shard copies * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted * @throws IOException if shard state could not be persisted */ - void updateRoutingEntry(ShardRouting shardRouting) throws IOException; - - /** - * Update the primary term. This method should only be invoked on primary shards. - * - * @param primaryTerm the new primary term - */ - void updatePrimaryTerm(long primaryTerm); - - /** - * Notifies the service of the current allocation ids in the cluster state. - * See {@link GlobalCheckpointTracker#updateAllocationIdsFromMaster(Set, Set)} for details. - * - * @param activeAllocationIds the allocation ids of the currently active shard copies - * @param initializingAllocationIds the allocation ids of the currently initializing shard copies - */ - void updateAllocationIdsFromMaster(Set activeAllocationIds, Set initializingAllocationIds); + void updateShardState(ShardRouting shardRouting, + long primaryTerm, + CheckedBiConsumer, IOException> primaryReplicaSyncer, + long applyingClusterStateVersion, + Set activeAllocationIds, + Set initializingAllocationIds) throws IOException; } public interface AllocatedIndex extends Iterable, IndexComponent { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index 7191e4517ab..d661713829a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -63,7 +63,7 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde private final ClusterService clusterService; - private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries(); + final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries(); @Inject public PeerRecoverySourceService(Settings settings, TransportService transportService, IndicesService indicesService, @@ -137,7 +137,7 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde } } - private final class OngoingRecoveries { + final class OngoingRecoveries { private final Map ongoingRecoveries = new HashMap<>(); synchronized RecoverySourceHandler addNewRecovery(StartRecoveryRequest request, IndexShard shard) { @@ -192,6 +192,12 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde if (onNewRecoveryException != null) { throw onNewRecoveryException; } + for (RecoverySourceHandler existingHandler : recoveryHandlers) { + if (existingHandler.getRequest().targetAllocationId().equals(request.targetAllocationId())) { + throw new DelayRecoveryException("recovery with same target already registered, waiting for " + + "previous recovery attempt to be cancelled or completed"); + } + } RecoverySourceHandler handler = createRecoverySourceHandler(request, shard); recoveryHandlers.add(handler); return handler; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 4823edcc2f1..429be167e78 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -82,6 +82,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde public static final String PREPARE_TRANSLOG = "internal:index/shard/recovery/prepare_translog"; public static final String FINALIZE = "internal:index/shard/recovery/finalize"; public static final String WAIT_CLUSTERSTATE = "internal:index/shard/recovery/wait_clusterstate"; + public static final String HANDOFF_PRIMARY_CONTEXT = "internal:index/shard/recovery/handoff_primary_context"; } private final ThreadPool threadPool; @@ -116,6 +117,11 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde FinalizeRecoveryRequestHandler()); transportService.registerRequestHandler(Actions.WAIT_CLUSTERSTATE, RecoveryWaitForClusterStateRequest::new, ThreadPool.Names.GENERIC, new WaitForClusterStateRequestHandler()); + transportService.registerRequestHandler( + Actions.HANDOFF_PRIMARY_CONTEXT, + RecoveryHandoffPrimaryContextRequest::new, + ThreadPool.Names.GENERIC, + new HandoffPrimaryContextRequestHandler()); } @Override @@ -411,6 +417,18 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde } } + class HandoffPrimaryContextRequestHandler implements TransportRequestHandler { + + @Override + public void messageReceived(final RecoveryHandoffPrimaryContextRequest request, final TransportChannel channel) throws Exception { + try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + recoveryRef.target().handoffPrimaryContext(request.primaryContext()); + } + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + + } + class TranslogOperationsRequestHandler implements TransportRequestHandler { @Override diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java new file mode 100644 index 00000000000..6646f6cea5d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.PrimaryContext; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * The request object to handoff the primary context to the relocation target. + */ +class RecoveryHandoffPrimaryContextRequest extends TransportRequest { + + private long recoveryId; + private ShardId shardId; + private PrimaryContext primaryContext; + + /** + * Initialize an empty request (used to serialize into when reading from a stream). + */ + RecoveryHandoffPrimaryContextRequest() { + } + + /** + * Initialize a request for the specified relocation. + * + * @param recoveryId the recovery ID of the relocation + * @param shardId the shard ID of the relocation + * @param primaryContext the primary context + */ + RecoveryHandoffPrimaryContextRequest(final long recoveryId, final ShardId shardId, final PrimaryContext primaryContext) { + this.recoveryId = recoveryId; + this.shardId = shardId; + this.primaryContext = primaryContext; + } + + long recoveryId() { + return this.recoveryId; + } + + ShardId shardId() { + return shardId; + } + + PrimaryContext primaryContext() { + return primaryContext; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + recoveryId = in.readLong(); + shardId = ShardId.readShardId(in); + primaryContext = new PrimaryContext(in); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(recoveryId); + shardId.writeTo(out); + primaryContext.writeTo(out); + } + + @Override + public String toString() { + return "RecoveryHandoffPrimaryContextRequest{" + + "recoveryId=" + recoveryId + + ", shardId=" + shardId + + ", primaryContext=" + primaryContext + + '}'; + } +} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 8abd3a05d8e..6a39700545b 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -31,6 +31,7 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.bytes.BytesArray; @@ -41,6 +42,7 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.seqno.LocalCheckpointTracker; @@ -52,14 +54,18 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; +import java.util.Comparator; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.StreamSupport; @@ -123,18 +129,23 @@ public class RecoverySourceHandler { this.response = new RecoveryResponse(); } + public StartRecoveryRequest getRequest() { + return request; + } + /** * performs the recovery from the local engine to the target */ public RecoveryResponse recoverToTarget() throws IOException { try (Translog.View translogView = shard.acquireTranslogView()) { - logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration()); + final long startingSeqNo; boolean isSequenceNumberBasedRecoveryPossible = request.startingSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO && isTranslogReadyForSequenceNumberBasedRecovery(translogView); if (isSequenceNumberBasedRecoveryPossible) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); + startingSeqNo = request.startingSeqNo(); } else { final Engine.IndexCommitRef phase1Snapshot; try { @@ -143,8 +154,12 @@ public class RecoverySourceHandler { IOUtils.closeWhileHandlingException(translogView); throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } + // we set this to unassigned to create a translog roughly according to the retention policy + // on the target + startingSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + try { - phase1(phase1Snapshot.getIndexCommit(), translogView); + phase1(phase1Snapshot.getIndexCommit(), translogView, startingSeqNo); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -157,7 +172,7 @@ public class RecoverySourceHandler { } try { - prepareTargetForTranslog(translogView.totalOperations()); + prepareTargetForTranslog(translogView.estimateTotalOperations(startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); } @@ -180,12 +195,10 @@ public class RecoverySourceHandler { throw new IndexShardRelocatedException(request.shardId()); } - logger.trace("snapshot translog for recovery; current size is [{}]", translogView.totalOperations()); + logger.trace("snapshot translog for recovery; current size is [{}]", translogView.estimateTotalOperations(startingSeqNo)); final long targetLocalCheckpoint; try { - final long startingSeqNo = - isSequenceNumberBasedRecoveryPossible ? request.startingSeqNo() : SequenceNumbersService.UNASSIGNED_SEQ_NO; - targetLocalCheckpoint = phase2(startingSeqNo, translogView.snapshot()); + targetLocalCheckpoint = phase2(startingSeqNo, translogView.snapshot(startingSeqNo)); } catch (Exception e) { throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e); } @@ -219,7 +232,7 @@ public class RecoverySourceHandler { logger.trace("all operations up to [{}] completed, checking translog content", endingSeqNo); final LocalCheckpointTracker tracker = new LocalCheckpointTracker(shard.indexSettings(), startingSeqNo, startingSeqNo - 1); - final Translog.Snapshot snapshot = translogView.snapshot(); + final Translog.Snapshot snapshot = translogView.snapshot(startingSeqNo); Translog.Operation operation; while ((operation = snapshot.next()) != null) { if (operation.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { @@ -244,7 +257,7 @@ public class RecoverySourceHandler { * segments that are missing. Only segments that have the same size and * checksum can be reused */ - public void phase1(final IndexCommit snapshot, final Translog.View translogView) { + public void phase1(final IndexCommit snapshot, final Translog.View translogView, final long startSeqNo) { cancellableThreads.checkForCancel(); // Total size of segment files that are recovered long totalSize = 0; @@ -322,10 +335,10 @@ public class RecoverySourceHandler { new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize)); cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, - response.phase1ExistingFileSizes, translogView.totalOperations())); + response.phase1ExistingFileSizes, translogView.estimateTotalOperations(startSeqNo))); // How many bytes we've copied since we last called RateLimiter.pause final Function outputStreamFactories = - md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes); + md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView, startSeqNo), chunkSizeInBytes); sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories); // Send the CLEAN_FILES request, which takes all of the files that // were transferred and renames them from their temporary file @@ -336,7 +349,8 @@ public class RecoverySourceHandler { // related to this recovery (out of date segments, for example) // are deleted try { - cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata)); + cancellableThreads.executeIO(() -> + recoveryTarget.cleanFiles(translogView.estimateTotalOperations(startSeqNo), recoverySourceMetadata)); } catch (RemoteTransportException | IOException targetException) { final IOException corruptIndexException; // we realized that after the index was copied and we wanted to finalize the recovery @@ -347,11 +361,8 @@ public class RecoverySourceHandler { try { final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot); StoreFileMetaData[] metadata = - StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new - StoreFileMetaData[size]); - ArrayUtil.timSort(metadata, (o1, o2) -> { - return Long.compare(o1.length(), o2.length()); // check small files first - }); + StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(StoreFileMetaData[]::new); + ArrayUtil.timSort(metadata, Comparator.comparingLong(StoreFileMetaData::length)); // check small files first for (StoreFileMetaData md : metadata) { cancellableThreads.checkForCancel(); logger.debug("checking integrity for file {} after remove corruption exception", md); @@ -448,7 +459,21 @@ public class RecoverySourceHandler { StopWatch stopWatch = new StopWatch().start(); logger.trace("finalizing recovery"); cancellableThreads.execute(() -> { - shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint); + /* + * Before marking the shard as in-sync we acquire an operation permit. We do this so that there is a barrier between marking a + * shard as in-sync and relocating a shard. If we acquire the permit then no relocation handoff can complete before we are done + * marking the shard as in-sync. If the relocation handoff holds all the permits then after the handoff completes and we acquire + * the permit then the state of the shard will be relocated and this recovery will fail. + */ + final PlainActionFuture onAcquired = new PlainActionFuture<>(); + shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME); + try (Releasable ignored = onAcquired.actionGet()) { + if (shard.state() == IndexShardState.RELOCATED) { + throw new IndexShardRelocatedException(shard.shardId()); + } + shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint); + } + recoveryTarget.finalizeRecovery(shard.getGlobalCheckpoint()); }); @@ -463,7 +488,7 @@ public class RecoverySourceHandler { cancellableThreads.execute(() -> recoveryTarget.ensureClusterStateVersion(currentClusterStateVersion)); logger.trace("performing relocation hand-off"); - cancellableThreads.execute(() -> shard.relocated("to " + request.targetNode())); + cancellableThreads.execute(() -> shard.relocated("to " + request.targetNode(), recoveryTarget::handoffPrimaryContext)); } /* * if the recovery process fails after setting the shard state to RELOCATED, both relocation source and @@ -577,11 +602,13 @@ public class RecoverySourceHandler { final class RecoveryOutputStream extends OutputStream { private final StoreFileMetaData md; private final Translog.View translogView; + private final long startSeqNp; private long position = 0; - RecoveryOutputStream(StoreFileMetaData md, Translog.View translogView) { + RecoveryOutputStream(StoreFileMetaData md, Translog.View translogView, long startSeqNp) { this.md = md; this.translogView = translogView; + this.startSeqNp = startSeqNp; } @Override @@ -599,7 +626,7 @@ public class RecoverySourceHandler { private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException { // Actually send the file chunk to the target node, waiting for it to complete cancellableThreads.executeIO(() -> - recoveryTarget.writeFileChunk(md, position, content, lastChunk, translogView.totalOperations()) + recoveryTarget.writeFileChunk(md, position, content, lastChunk, translogView.estimateTotalOperations(startSeqNp)) ); if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us throw new IndexShardClosedException(request.shardId()); @@ -610,7 +637,7 @@ public class RecoverySourceHandler { void sendFiles(Store store, StoreFileMetaData[] files, Function outputStreamFactory) throws Exception { store.incRef(); try { - ArrayUtil.timSort(files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first + ArrayUtil.timSort(files, Comparator.comparingLong(StoreFileMetaData::length)); // send smallest first for (int i = 0; i < files.length; i++) { final StoreFileMetaData md = files[i]; try (IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 77d8b4d7077..459b811552b 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -467,6 +467,11 @@ public class RecoveryState implements ToXContent, Streamable { assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]"; } + public synchronized void incrementRecoveredOperations(int ops) { + recovered += ops; + assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]"; + } + public synchronized void decrementRecoveredOperations(int ops) { recovered -= ops; assert recovered >= 0 : "recovered operations must be non-negative. Because [" + recovered + "] after decrementing [" + ops + "]"; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 6a465f11115..2837a85d1ae 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotRecoveringException; import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.PrimaryContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; @@ -61,7 +62,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; import java.util.function.LongConsumer; /** @@ -379,30 +379,30 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget } @Override - public long indexTranslogOperations(List operations, int totalTranslogOps) throws MapperException, IOException { + public void handoffPrimaryContext(final PrimaryContext primaryContext) { + indexShard.updateAllocationIdsFromPrimaryContext(primaryContext); + } + + @Override + public long indexTranslogOperations(List operations, int totalTranslogOps) throws IOException { final RecoveryState.Translog translog = state().getTranslog(); translog.totalOperations(totalTranslogOps); assert indexShard().recoveryState() == state(); if (indexShard().state() != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, indexShard().state()); } - // first convert all translog operations to engine operations to check for mapping updates - List engineOps = operations.stream().map( - op -> { - Engine.Operation engineOp = indexShard().convertToEngineOp(op, Engine.Operation.Origin.PEER_RECOVERY); - if (engineOp instanceof Engine.Index && ((Engine.Index) engineOp).parsedDoc().dynamicMappingsUpdate() != null) { - throw new MapperException("mapping updates are not allowed (type: [" + engineOp.type() + "], id: [" + - ((Engine.Index) engineOp).id() + "])"); - } - return engineOp; - } - ).collect(Collectors.toList()); - // actually apply engine operations - for (Engine.Operation engineOp : engineOps) { - indexShard().applyOperation(engineOp); - translog.incrementRecoveredOperations(); + for (Translog.Operation operation : operations) { + Engine.Result result = indexShard().applyTranslogOperation(operation, Engine.Operation.Origin.PEER_RECOVERY, update -> { + throw new MapperException("mapping updates are not allowed [" + operation + "]"); + }); + assert result.hasFailure() == false : "unexpected failure while replicating translog entry: " + result.getFailure(); + ExceptionsHelper.reThrowIfNotNull(result.getFailure()); } + // update stats only after all operations completed (to ensure that mapping updates don't mess with stats) + translog.incrementRecoveredOperations(operations.size()); indexShard().sync(); + // roll over / flush / trim if needed + indexShard().afterWriteOperation(); return indexShard().getLocalCheckpoint(); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 42cf1bc1ce1..34b0df2293f 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.shard.PrimaryContext; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; @@ -49,6 +50,13 @@ public interface RecoveryTargetHandler { */ void ensureClusterStateVersion(long clusterStateVersion); + /** + * Handoff the primary context between the relocation source and the relocation target. + * + * @param primaryContext the primary context from the relocation source + */ + void handoffPrimaryContext(PrimaryContext primaryContext); + /** * Index a set of translog operations on the target * @param operations operations to index diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index a4f24b710b2..14c8f762e6d 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -23,15 +23,14 @@ import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.shard.PrimaryContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.transport.EmptyTransportResponseHandler; -import org.elasticsearch.transport.FutureTransportResponseHandler; import org.elasticsearch.transport.TransportFuture; import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -97,7 +96,17 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.WAIT_CLUSTERSTATE, new RecoveryWaitForClusterStateRequest(recoveryId, shardId, clusterStateVersion), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + } + + @Override + public void handoffPrimaryContext(final PrimaryContext primaryContext) { + transportService.submitRequest( + targetNode, + PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT, + new RecoveryHandoffPrimaryContextRequest(recoveryId, shardId, primaryContext), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), + EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } @Override @@ -159,13 +168,13 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { } transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILE_CHUNK, - new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk, - totalTranslogOps, - /* we send totalOperations with every request since we collect stats on the target and that way we can - * see how many translog ops we accumulate while copying files across the network. A future optimization - * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. - */ - throttleTimeInNanos), fileChunkRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk, + totalTranslogOps, + /* we send estimateTotalOperations with every request since we collect stats on the target and that way we can + * see how many translog ops we accumulate while copying files across the network. A future optimization + * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. + */ + throttleTimeInNanos), fileChunkRequestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } } diff --git a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java index 9ee08126420..93c5a18222c 100644 --- a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java @@ -63,7 +63,7 @@ public class InternalSettingsPreparer { * @return the {@link Settings} and {@link Environment} as a {@link Tuple} */ public static Environment prepareEnvironment(Settings input, Terminal terminal) { - return prepareEnvironment(input, terminal, Collections.emptyMap()); + return prepareEnvironment(input, terminal, Collections.emptyMap(), null); } /** @@ -71,16 +71,18 @@ public class InternalSettingsPreparer { * and then replacing all property placeholders. If a {@link Terminal} is provided and configuration settings are loaded, * settings with a value of ${prompt.text} or ${prompt.secret} will result in a prompt for * the setting to the user. - * @param input The custom settings to use. These are not overwritten by settings in the configuration file. - * @param terminal the Terminal to use for input/output - * @param properties Map of properties key/value pairs (usually from the command-line) + * + * @param input the custom settings to use; these are not overwritten by settings in the configuration file + * @param terminal the Terminal to use for input/output + * @param properties map of properties key/value pairs (usually from the command-line) + * @param configPath path to config directory; (use null to indicate the default) * @return the {@link Settings} and {@link Environment} as a {@link Tuple} */ - public static Environment prepareEnvironment(Settings input, Terminal terminal, Map properties) { + public static Environment prepareEnvironment(Settings input, Terminal terminal, Map properties, Path configPath) { // just create enough settings to build the environment, to get the config dir Settings.Builder output = Settings.builder(); initializeSettings(output, input, properties); - Environment environment = new Environment(output.build()); + Environment environment = new Environment(output.build(), configPath); if (Files.exists(environment.configFile().resolve("elasticsearch.yaml"))) { throw new SettingsException("elasticsearch.yaml was deprecated in 5.5.0 and must be renamed to elasticsearch.yml"); @@ -104,11 +106,11 @@ public class InternalSettingsPreparer { initializeSettings(output, input, properties); finalizeSettings(output, terminal); - environment = new Environment(output.build()); + environment = new Environment(output.build(), configPath); // we put back the path.logs so we can use it in the logging configuration file output.put(Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile().toAbsolutePath().normalize().toString()); - return new Environment(output.build()); + return new Environment(output.build(), configPath); } /** diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 13c829844e1..93f37cddf08 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -47,6 +47,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; +import org.elasticsearch.cluster.metadata.TemplateUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.service.ClusterService; @@ -268,9 +269,6 @@ public class Node implements Closeable { Logger logger = Loggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); - if (DiscoveryNode.nodeRequiresLocalStorage(tmpSettings)) { - checkForIndexDataInDefaultPathData(tmpSettings, nodeEnvironment, logger); - } // this must be captured after the node name is possibly added to the settings final String nodeName = NODE_NAME_SETTING.get(tmpSettings); if (hadPredefinedNodeName == false) { @@ -301,16 +299,15 @@ public class Node implements Closeable { environment.configFile(), Arrays.toString(environment.dataFiles()), environment.logsFile(), environment.pluginsFile()); } - this.pluginsService = new PluginsService(tmpSettings, environment.modulesFile(), environment.pluginsFile(), classpathPlugins); + this.pluginsService = new PluginsService(tmpSettings, environment.configFile(), environment.modulesFile(), environment.pluginsFile(), classpathPlugins); this.settings = pluginsService.updatedSettings(); localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); // create the environment based on the finalized (processed) view of the settings // this is just to makes sure that people get the same settings, no matter where they ask them from - this.environment = new Environment(this.settings); + this.environment = new Environment(this.settings, environment.configFile()); Environment.assertEquivalent(environment, this.environment); - final List> executorBuilders = pluginsService.getExecutorBuilders(settings); final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); @@ -386,8 +383,14 @@ public class Node implements Closeable { .flatMap(p -> p.getNamedXContent().stream()), ClusterModule.getNamedXWriteables().stream()) .flatMap(Function.identity()).collect(toList())); - final TribeService tribeService = new TribeService(settings, clusterService, nodeId, namedWriteableRegistry, - s -> newTribeClientNode(s, classpathPlugins)); + final TribeService tribeService = + new TribeService( + settings, + environment.configFile(), + clusterService, + nodeId, + namedWriteableRegistry, + (s, p) -> newTribeClientNode(s, classpathPlugins, p)); resourcesToClose.add(tribeService); modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), xContentRegistry)); final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment, xContentRegistry); @@ -415,6 +418,7 @@ public class Node implements Closeable { Collection> indexMetaDataUpgraders = pluginsService.filterPlugins(Plugin.class).stream() .map(Plugin::getIndexMetaDataUpgrader).collect(Collectors.toList()); final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders, indexTemplateMetaDataUpgraders); + new TemplateUpgradeService(settings, client, clusterService, threadPool, indexTemplateMetaDataUpgraders); final Transport transport = networkModule.getTransportSupplier().get(); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings()); @@ -518,73 +522,6 @@ public class Node implements Closeable { } } - /** - * Checks for path.data and default.path.data being configured, and there being index data in any of the paths in default.path.data. - * - * @param settings the settings to check for path.data and default.path.data - * @param nodeEnv the current node environment - * @param logger a logger where messages regarding the detection will be logged - * @throws IOException if an I/O exception occurs reading the directory structure - */ - static void checkForIndexDataInDefaultPathData( - final Settings settings, final NodeEnvironment nodeEnv, final Logger logger) throws IOException { - if (!Environment.PATH_DATA_SETTING.exists(settings) || !Environment.DEFAULT_PATH_DATA_SETTING.exists(settings)) { - return; - } - - boolean clean = true; - for (final String defaultPathData : Environment.DEFAULT_PATH_DATA_SETTING.get(settings)) { - final Path defaultNodeDirectory = NodeEnvironment.resolveNodePath(getPath(defaultPathData), nodeEnv.getNodeLockId()); - if (Files.exists(defaultNodeDirectory) == false) { - continue; - } - - if (isDefaultPathDataInPathData(nodeEnv, defaultNodeDirectory)) { - continue; - } - - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(defaultNodeDirectory); - final Set availableIndexFolders = nodeEnv.availableIndexFoldersForPath(nodePath); - if (availableIndexFolders.isEmpty()) { - continue; - } - - clean = false; - logger.error("detected index data in default.path.data [{}] where there should not be any", nodePath.indicesPath); - for (final String availableIndexFolder : availableIndexFolders) { - logger.info( - "index folder [{}] in default.path.data [{}] must be moved to any of {}", - availableIndexFolder, - nodePath.indicesPath, - Arrays.stream(nodeEnv.nodePaths()).map(np -> np.indicesPath).collect(Collectors.toList())); - } - } - - if (clean) { - return; - } - - final String message = String.format( - Locale.ROOT, - "detected index data in default.path.data %s where there should not be any; check the logs for details", - Environment.DEFAULT_PATH_DATA_SETTING.get(settings)); - throw new IllegalStateException(message); - } - - private static boolean isDefaultPathDataInPathData(final NodeEnvironment nodeEnv, final Path defaultNodeDirectory) throws IOException { - for (final NodeEnvironment.NodePath dataPath : nodeEnv.nodePaths()) { - if (Files.isSameFile(dataPath.path, defaultNodeDirectory)) { - return true; - } - } - return false; - } - - @SuppressForbidden(reason = "read path that is not configured in environment") - private static Path getPath(final String path) { - return PathUtils.get(path); - } - // visible for testing static void warnIfPreRelease(final Version version, final boolean isSnapshot, final Logger logger) { if (!version.isRelease() || isSnapshot) { @@ -978,8 +915,8 @@ public class Node implements Closeable { } /** Constructs an internal node used as a client into a cluster fronted by this tribe node. */ - protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { - return new Node(new Environment(settings), classpathPlugins); + protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins, Path configPath) { + return new Node(new Environment(settings, configPath), classpathPlugins); } /** Constructs a ClusterInfoService which may be mocked for tests. */ diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index b2cea4c0ad2..2e0ec0f242e 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.threadpool.ExecutorBuilder; import java.io.IOException; +import java.lang.reflect.Constructor; import java.net.URL; import java.net.URLClassLoader; import java.nio.file.DirectoryStream; @@ -61,12 +62,13 @@ import java.util.Objects; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; public class PluginsService extends AbstractComponent { + private final Path configPath; + /** * We keep around a list of plugins and modules */ @@ -90,14 +92,16 @@ public class PluginsService extends AbstractComponent { * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem * @param classpathPlugins Plugins that exist in the classpath which should be loaded */ - public PluginsService(Settings settings, Path modulesDirectory, Path pluginsDirectory, Collection> classpathPlugins) { + public PluginsService(Settings settings, Path configPath, Path modulesDirectory, Path pluginsDirectory, Collection> classpathPlugins) { super(settings); + this.configPath = configPath; + List> pluginsLoaded = new ArrayList<>(); List pluginsList = new ArrayList<>(); // first we load plugins that are on the classpath. this is for tests and transport clients for (Class pluginClass : classpathPlugins) { - Plugin plugin = loadPlugin(pluginClass, settings); + Plugin plugin = loadPlugin(pluginClass, settings, configPath); PluginInfo pluginInfo = new PluginInfo(pluginClass.getName(), "classpath plugin", "NA", pluginClass.getName(), false); if (logger.isTraceEnabled()) { logger.trace("plugin loaded from classpath [{}]", pluginInfo); @@ -381,7 +385,7 @@ public class PluginsService extends AbstractComponent { reloadLuceneSPI(loader); final Class pluginClass = loadPluginClass(bundle.plugin.getClassname(), loader); - final Plugin plugin = loadPlugin(pluginClass, settings); + final Plugin plugin = loadPlugin(pluginClass, settings, configPath); plugins.add(new Tuple<>(bundle.plugin, plugin)); } @@ -414,22 +418,45 @@ public class PluginsService extends AbstractComponent { } } - private Plugin loadPlugin(Class pluginClass, Settings settings) { - try { - try { - return pluginClass.getConstructor(Settings.class).newInstance(settings); - } catch (NoSuchMethodException e) { - try { - return pluginClass.getConstructor().newInstance(); - } catch (NoSuchMethodException e1) { - throw new ElasticsearchException("No constructor for [" + pluginClass + "]. A plugin class must " + - "have either an empty default constructor or a single argument constructor accepting a " + - "Settings instance"); - } - } - } catch (Exception e) { - throw new ElasticsearchException("Failed to load plugin class [" + pluginClass.getName() + "]", e); + private Plugin loadPlugin(Class pluginClass, Settings settings, Path configPath) { + final Constructor[] constructors = pluginClass.getConstructors(); + if (constructors.length == 0) { + throw new IllegalStateException("no public constructor for [" + pluginClass.getName() + "]"); } + + if (constructors.length > 1) { + throw new IllegalStateException("no unique public constructor for [" + pluginClass.getName() + "]"); + } + + final Constructor constructor = constructors[0]; + if (constructor.getParameterCount() > 2) { + throw new IllegalStateException(signatureMessage(pluginClass)); + } + + final Class[] parameterTypes = constructor.getParameterTypes(); + try { + if (constructor.getParameterCount() == 2 && parameterTypes[0] == Settings.class && parameterTypes[1] == Path.class) { + return (Plugin)constructor.newInstance(settings, configPath); + } else if (constructor.getParameterCount() == 1 && parameterTypes[0] == Settings.class) { + return (Plugin)constructor.newInstance(settings); + } else if (constructor.getParameterCount() == 0) { + return (Plugin)constructor.newInstance(); + } else { + throw new IllegalStateException(signatureMessage(pluginClass)); + } + } catch (final ReflectiveOperationException e) { + throw new IllegalStateException("failed to load plugin class [" + pluginClass.getName() + "]", e); + } + } + + private String signatureMessage(final Class clazz) { + return String.format( + Locale.ROOT, + "no public constructor of correct signature for [%s]; must be [%s], [%s], or [%s]", + clazz.getName(), + "(org.elasticsearch.common.settings.Settings,java.nio.file.Path)", + "(org.elasticsearch.common.settings.Settings)", + "()"); } public List filterPlugins(Class type) { diff --git a/core/src/main/java/org/elasticsearch/plugins/spi/NamedXContentProvider.java b/core/src/main/java/org/elasticsearch/plugins/spi/NamedXContentProvider.java new file mode 100644 index 00000000000..ef511fcfeae --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/spi/NamedXContentProvider.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins.spi; + +import org.elasticsearch.common.xcontent.NamedXContentRegistry; + +import java.util.List; + +/** + * Provides named XContent parsers. + */ +public interface NamedXContentProvider { + + /** + * @return a list of {@link NamedXContentRegistry.Entry} that this plugin provides. + */ + List getNamedXContentParsers(); +} diff --git a/core/src/main/java/org/elasticsearch/plugins/spi/package-info.java b/core/src/main/java/org/elasticsearch/plugins/spi/package-info.java new file mode 100644 index 00000000000..7740e1424fb --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/spi/package-info.java @@ -0,0 +1,25 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * This package contains interfaces for services provided by + * Elasticsearch plugins to external applications like the + * Java High Level Rest Client. + */ +package org.elasticsearch.plugins.spi; diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java index ac52ac30d69..102bc5a5f05 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -20,6 +20,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.xcontent.ToXContent; @@ -189,8 +190,11 @@ public final class RepositoryData { */ public RepositoryData removeSnapshot(final SnapshotId snapshotId) { Map newSnapshotIds = snapshotIds.values().stream() - .filter(id -> snapshotId.equals(id) == false) + .filter(id -> !snapshotId.equals(id)) .collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())); + if (newSnapshotIds.size() == snapshotIds.size()) { + throw new ResourceNotFoundException("Attempting to remove non-existent snapshot [{}] from repository data", snapshotId); + } Map newSnapshotStates = new HashMap<>(snapshotStates); newSnapshotStates.remove(snapshotId.getUUID()); Map> indexSnapshots = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 29b12666c6c..dccf12c8ed3 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -412,8 +413,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp "its index folder.", metadata.name(), indexId), ioe); } } - } catch (IOException ex) { - throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex); + } catch (IOException | ResourceNotFoundException ex) { + throw new RepositoryException(metadata.name(), "failed to delete snapshot [" + snapshotId + "]", ex); } } @@ -683,7 +684,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp snapshotsBytes = bStream.bytes(); } // write the index file - writeAtomic(INDEX_FILE_PREFIX + Long.toString(newGen), snapshotsBytes); + final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen); + logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); + writeAtomic(indexBlob, snapshotsBytes); // delete the N-2 index file if it exists, keep the previous one around as a backup if (isReadOnly() == false && newGen - 2 >= 0) { final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); @@ -701,6 +704,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp if (snapshotsBlobContainer.blobExists(INDEX_LATEST_BLOB)) { snapshotsBlobContainer.deleteBlob(INDEX_LATEST_BLOB); } + logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); writeAtomic(INDEX_LATEST_BLOB, genBytes); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 104ffe420ab..07f39b54f61 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.RequestCacheStats; @@ -124,7 +125,10 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("version", "default:false;alias:v;desc:es version"); table.addCell("build", "default:false;alias:b;desc:es build hash"); table.addCell("jdk", "default:false;alias:j;desc:jdk version"); - table.addCell("disk.avail", "default:false;alias:d,disk,diskAvail;text-align:right;desc:available disk space"); + table.addCell("disk.total", "default:false;alias:dt,diskTotal;text-align:right;desc:total disk space"); + table.addCell("disk.used", "default:false;alias:du,diskUsed;text-align:right;desc:used disk space"); + table.addCell("disk.avail", "default:false;alias:d,da,disk,diskAvail;text-align:right;desc:available disk space"); + table.addCell("disk.used_percent", "default:false;alias:dup,diskUsedPercent;text-align:right;desc:used disk space percentage"); table.addCell("heap.current", "default:false;alias:hc,heapCurrent;text-align:right;desc:used heap"); table.addCell("heap.percent", "alias:hp,heapPercent;text-align:right;desc:used heap ratio"); table.addCell("heap.max", "default:false;alias:hm,heapMax;text-align:right;desc:max configured heap"); @@ -267,7 +271,15 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(node.getVersion().toString()); table.addCell(info == null ? null : info.getBuild().shortHash()); table.addCell(jvmInfo == null ? null : jvmInfo.version()); + + long diskTotal = fsInfo.getTotal().getTotal().getBytes(); + long diskUsed = diskTotal - fsInfo.getTotal().getAvailable().getBytes(); + double diskUsedRatio = diskTotal == 0 ? 1.0 : (double) diskUsed / diskTotal; + table.addCell(fsInfo == null ? null : fsInfo.getTotal().getTotal()); + table.addCell(fsInfo == null ? null : new ByteSizeValue(diskUsed)); table.addCell(fsInfo == null ? null : fsInfo.getTotal().getAvailable()); + table.addCell(fsInfo == null ? null : String.format(Locale.ROOT, "%.2f", 100.0 * diskUsedRatio)); + table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed()); table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPercent()); table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax()); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptContext.java b/core/src/main/java/org/elasticsearch/script/ScriptContext.java index 3f931f659ed..081a26d1e51 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptContext.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptContext.java @@ -46,6 +46,13 @@ import java.lang.reflect.Method; * The StatefulFactoryType is an optional class which allows a stateful factory from the * stateless factory type required by the {@link ScriptService}. If defined, the StatefulFactoryType * must have a method named {@code newInstance} which returns an instance of InstanceType. + *

+ * Both the FactoryType and StatefulFactoryType may have abstract methods to indicate + * whether a variable is used in a script. These method should return a {@code boolean} and their name + * should start with {@code needs}, followed by the variable name, with the first letter uppercased. + * For example, to check if a variable {@code doc} is used, a method {@code boolean needsDoc()} should be added. + * If the variable name starts with an underscore, for example, {@code _score}, the needs method would + * be {@code boolean needs_score()}. */ public final class ScriptContext { diff --git a/core/src/main/java/org/elasticsearch/script/SearchScript.java b/core/src/main/java/org/elasticsearch/script/SearchScript.java index 4c50147b22c..d0c932a3490 100644 --- a/core/src/main/java/org/elasticsearch/script/SearchScript.java +++ b/core/src/main/java/org/elasticsearch/script/SearchScript.java @@ -144,12 +144,11 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript { /** A factory to construct {@link SearchScript} instances. */ public interface LeafFactory { SearchScript newInstance(LeafReaderContext ctx) throws IOException; + /** - * Indicates if document scores may be needed by this {@link SearchScript}. - * - * @return {@code true} if scores are needed. + * Return {@code true} if the script needs {@code _score} calculated, or {@code false} otherwise. */ - boolean needsScores(); + boolean needs_score(); } /** A factory to construct stateful {@link SearchScript} factories for a specific index. */ diff --git a/core/src/main/java/org/elasticsearch/search/SearchHit.java b/core/src/main/java/org/elasticsearch/search/SearchHit.java index 81cba7d8db7..4300cbcc111 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/SearchHit.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -75,7 +76,7 @@ import static org.elasticsearch.search.fetch.subphase.highlight.HighlightField.r * * @see SearchHits */ -public final class SearchHit implements Streamable, ToXContentObject, Iterable { +public final class SearchHit implements Streamable, ToXContentObject, Iterable { private transient int docId; @@ -91,7 +92,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable fields = emptyMap(); + private Map fields = emptyMap(); private Map highlightFields = null; @@ -118,11 +119,11 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable fields) { + public SearchHit(int docId, String id, Text type, Map fields) { this(docId, id, type, null, fields); } - public SearchHit(int nestedTopDocId, String id, Text type, NestedIdentity nestedIdentity, Map fields) { + public SearchHit(int nestedTopDocId, String id, Text type, NestedIdentity nestedIdentity, Map fields) { this.docId = nestedTopDocId; if (id != null) { this.id = new Text(id); @@ -252,14 +253,14 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable iterator() { + public Iterator iterator() { return fields.values().iterator(); } /** * The hit field matching the given field name. */ - public SearchHitField field(String fieldName) { + public DocumentField field(String fieldName) { return getFields().get(fieldName); } @@ -267,16 +268,16 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable getFields() { + public Map getFields() { return fields == null ? emptyMap() : fields; } // returns the fields without handling null cases - public Map fieldsOrNull() { + public Map fieldsOrNull() { return fields; } - public void fields(Map fields) { + public void fields(Map fields) { this.fields = fields; } @@ -382,10 +383,10 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable metaFields = new ArrayList<>(); - List otherFields = new ArrayList<>(); + List metaFields = new ArrayList<>(); + List otherFields = new ArrayList<>(); if (fields != null && !fields.isEmpty()) { - for (SearchHitField field : fields.values()) { + for (DocumentField field : fields.values()) { if (field.getValues().isEmpty()) { continue; } @@ -424,7 +425,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable map.put(Fields.HIGHLIGHT, value), (p, c) -> parseHighlightFields(p), new ParseField(Fields.HIGHLIGHT)); parser.declareObject((map, value) -> { - Map fieldMap = get(Fields.FIELDS, map, new HashMap()); + Map fieldMap = get(Fields.FIELDS, map, new HashMap()); fieldMap.putAll(value); map.put(Fields.FIELDS, fieldMap); }, (p, c) -> parseFields(p), new ParseField(Fields.FIELDS)); @@ -528,7 +525,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable fields = get(Fields.FIELDS, values, null); + Map fields = get(Fields.FIELDS, values, null); SearchHit searchHit = new SearchHit(-1, id, type, nestedIdentity, fields); searchHit.index = get(Fields._INDEX, values, null); @@ -585,28 +582,23 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable { @SuppressWarnings("unchecked") - Map fieldMap = (Map) map.computeIfAbsent(Fields.FIELDS, - v -> new HashMap()); + Map fieldMap = (Map) map.computeIfAbsent(Fields.FIELDS, + v -> new HashMap()); fieldMap.put(field.getName(), field); }, (p, c) -> { List values = new ArrayList<>(); values.add(parseStoredFieldsValue(p)); - return new SearchHitField(metadatafield, values); + return new DocumentField(metadatafield, values); }, new ParseField(metadatafield), ValueType.VALUE); } } } - private static Map parseFields(XContentParser parser) throws IOException { - Map fields = new HashMap<>(); - while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { - String fieldName = parser.currentName(); - ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); - List values = new ArrayList<>(); - while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { - values.add(parseStoredFieldsValue(parser)); - } - fields.put(fieldName, new SearchHitField(fieldName, values)); + private static Map parseFields(XContentParser parser) throws IOException { + Map fields = new HashMap<>(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + DocumentField field = DocumentField.fromXContent(parser); + fields.put(field.getName(), field); } return fields; } @@ -704,12 +696,12 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable fields = new HashMap<>(); + Map fields = new HashMap<>(); for (int i = 0; i < size; i++) { - SearchHitField hitField = SearchHitField.readSearchHitField(in); + DocumentField hitField = DocumentField.readDocumentField(in); fields.put(hitField.getName(), hitField); } this.fields = unmodifiableMap(fields); @@ -770,7 +762,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable { - - private String name; - private List values; - - private SearchHitField() { - } - - public SearchHitField(String name, List values) { - this.name = name; - this.values = values; - } - - /** - * The name of the field. - */ - public String getName() { - return name; - } - - /** - * The first value of the hit. - */ - public V getValue() { - if (values == null || values.isEmpty()) { - return null; - } - return (V)values.get(0); - } - - /** - * The field values. - */ - public List getValues() { - return values; - } - - /** - * @return The field is a metadata field - */ - public boolean isMetadataField() { - return MapperService.isMetadataField(name); - } - - @Override - public Iterator iterator() { - return values.iterator(); - } - - public static SearchHitField readSearchHitField(StreamInput in) throws IOException { - SearchHitField result = new SearchHitField(); - result.readFrom(in); - return result; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - name = in.readString(); - int size = in.readVInt(); - values = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - values.add(in.readGenericValue()); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeVInt(values.size()); - for (Object value : values) { - out.writeGenericValue(value); - } - } - - @Override - public boolean equals(Object obj) { - if (obj == null || getClass() != obj.getClass()) { - return false; - } - SearchHitField other = (SearchHitField) obj; - return Objects.equals(name, other.name) - && Objects.equals(values, other.values); - } - - @Override - public int hashCode() { - return Objects.hash(name, values); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java index ef82c48b4e7..bbd9e3a20f7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -115,6 +116,11 @@ public abstract class AbstractAggregationBuilder getMetaData() { + return Collections.unmodifiableMap(metaData); + } + @Override public final String getWriteableName() { // We always use the type of the aggregation as the writeable name diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 16f8ef2444f..694a78f9d1c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -64,6 +64,9 @@ public abstract class AggregationBuilder @Override public abstract AggregationBuilder setMetaData(Map metaData); + /** Return any associated metadata with this {@link AggregationBuilder}. */ + public abstract Map getMetaData(); + /** Add a sub aggregation to this builder. */ public abstract AggregationBuilder subAggregation(AggregationBuilder aggregation); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java index 05521e48831..ba51fc419fb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregations.java @@ -18,10 +18,11 @@ */ package org.elasticsearch.search.aggregations; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParserUtils; import java.io.IOException; import java.util.ArrayList; @@ -29,10 +30,12 @@ import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseTypedKeysObject; /** * Represents a set of {@link Aggregation}s @@ -133,7 +136,15 @@ public class Aggregations implements Iterable, ToXContent { XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.START_OBJECT) { - aggregations.add(XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class)); + SetOnce typedAgg = new SetOnce<>(); + String currentField = parser.currentName(); + parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class, typedAgg::set); + if (typedAgg.get() != null) { + aggregations.add(typedAgg.get()); + } else { + throw new ParsingException(parser.getTokenLocation(), + String.format(Locale.ROOT, "Could not parse aggregation keyed as [%s]", currentField)); + } } } return new Aggregations(aggregations); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/ParsedMultiBucketAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/ParsedMultiBucketAggregation.java index df80ada8ddd..1e601cb30fe 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/ParsedMultiBucketAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/ParsedMultiBucketAggregation.java @@ -171,7 +171,8 @@ public abstract class ParsedMultiBucketAggregation B parseSignificantTermsBucketXContent(final XContentParser parser, final B bucket, @@ -179,7 +180,8 @@ public abstract class ParsedSignificantTerms extends ParsedMultiBucketAggregatio bucket.supersetDf = parser.longValue(); } } else if (token == XContentParser.Token.START_OBJECT) { - aggregations.add(XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class)); + XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class, + aggregations::add); } } bucket.setAggregations(new Aggregations(aggregations)); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java index 821bb000e33..6aa8bd16246 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/ParsedTerms.java @@ -136,7 +136,8 @@ public abstract class ParsedTerms extends ParsedMultiBucketAggregation pipelineAggregators, + public InternalSum(String name, double sum, DocValueFormat formatter, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.sum = sum; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index d1986f2dd25..e2cb84f03ad 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -191,7 +191,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits { protected int doHashCode() { int hashCode = from; hashCode = 31 * hashCode + size; - hashCode = 31 * hashCode + topDocs.totalHits; + hashCode = 31 * hashCode + Long.hashCode(topDocs.totalHits); for (int d = 0; d < topDocs.scoreDocs.length; d++) { ScoreDoc doc = topDocs.scoreDocs[d]; hashCode = 31 * hashCode + doc.doc; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index 5fca34beff2..5a69be8108a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -174,7 +174,7 @@ public abstract class ValuesSource { @Override public boolean needsScores() { - return script.needsScores(); + return script.needs_score(); } } @@ -246,7 +246,7 @@ public abstract class ValuesSource { @Override public boolean needsScores() { - return script.needsScores(); + return script.needs_score(); } @Override @@ -387,7 +387,7 @@ public abstract class ValuesSource { @Override public boolean needsScores() { - return script.needsScores(); + return script.needs_score(); } } @@ -406,7 +406,7 @@ public abstract class ValuesSource { @Override public boolean needsScores() { - return script.needsScores(); + return script.needs_score(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index e5bfcfa6df5..e28bd505d37 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -29,6 +29,7 @@ import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.text.Text; @@ -42,11 +43,10 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.InnerHitsFetchSubPhase; -import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SourceLookup; import org.elasticsearch.tasks.TaskCancelledException; @@ -186,11 +186,11 @@ public class FetchPhase implements SearchPhase { loadStoredFields(context, subReaderContext, fieldsVisitor, subDocId); fieldsVisitor.postProcess(context.mapperService()); - Map searchFields = null; + Map searchFields = null; if (!fieldsVisitor.fields().isEmpty()) { searchFields = new HashMap<>(fieldsVisitor.fields().size()); for (Map.Entry> entry : fieldsVisitor.fields().entrySet()) { - searchFields.put(entry.getKey(), new SearchHitField(entry.getKey(), entry.getValue())); + searchFields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); } } @@ -219,7 +219,7 @@ public class FetchPhase implements SearchPhase { loadStoredFields(context, subReaderContext, rootFieldsVisitor, rootSubDocId); rootFieldsVisitor.postProcess(context.mapperService()); - Map searchFields = getSearchFields(context, nestedSubDocId, fieldNames, fieldNamePatterns, subReaderContext); + Map searchFields = getSearchFields(context, nestedSubDocId, fieldNames, fieldNamePatterns, subReaderContext); DocumentMapper documentMapper = context.mapperService().documentMapper(rootFieldsVisitor.uid().type()); SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId); @@ -272,8 +272,8 @@ public class FetchPhase implements SearchPhase { return new SearchHit(nestedTopDocId, rootFieldsVisitor.uid().id(), documentMapper.typeText(), nestedIdentity, searchFields); } - private Map getSearchFields(SearchContext context, int nestedSubDocId, Set fieldNames, List fieldNamePatterns, LeafReaderContext subReaderContext) { - Map searchFields = null; + private Map getSearchFields(SearchContext context, int nestedSubDocId, Set fieldNames, List fieldNamePatterns, LeafReaderContext subReaderContext) { + Map searchFields = null; if (context.hasStoredFields() && !context.storedFieldsContext().fieldNames().isEmpty()) { FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames, fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, false); @@ -283,7 +283,7 @@ public class FetchPhase implements SearchPhase { if (!nestedFieldsVisitor.fields().isEmpty()) { searchFields = new HashMap<>(nestedFieldsVisitor.fields().size()); for (Map.Entry> entry : nestedFieldsVisitor.fields().entrySet()) { - searchFields.put(entry.getKey(), new SearchHitField(entry.getKey(), entry.getValue())); + searchFields.put(entry.getKey(), new DocumentField(entry.getKey(), entry.getValue())); } } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 42cee23d390..cd0f1645ce0 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.search.fetch.subphase; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; @@ -55,9 +55,9 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { if (hitContext.hit().fieldsOrNull() == null) { hitContext.hit().fields(new HashMap<>(2)); } - SearchHitField hitField = hitContext.hit().getFields().get(field); + DocumentField hitField = hitContext.hit().getFields().get(field); if (hitField == null) { - hitField = new SearchHitField(field, new ArrayList<>(2)); + hitField = new DocumentField(field, new ArrayList<>(2)); hitContext.hit().getFields().put(field, hitField); } MappedFieldType fieldType = context.mapperService().fullName(field); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java index 0ffef32e427..93fc9ccb1f6 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java @@ -23,8 +23,8 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.index.mapper.ParentFieldMapper; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; @@ -51,12 +51,12 @@ public final class ParentFieldSubFetchPhase implements FetchSubPhase { return; } - Map fields = hitContext.hit().fieldsOrNull(); + Map fields = hitContext.hit().fieldsOrNull(); if (fields == null) { fields = new HashMap<>(); hitContext.hit().fields(fields); } - fields.put(ParentFieldMapper.NAME, new SearchHitField(ParentFieldMapper.NAME, Collections.singletonList(parentId))); + fields.put(ParentFieldMapper.NAME, new DocumentField(ParentFieldMapper.NAME, Collections.singletonList(parentId))); } public static String getParentId(ParentFieldMapper fieldMapper, LeafReader reader, int docId) { diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java index 61c1c802de8..82e0725ae1d 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/ScriptFieldsFetchSubPhase.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.search.fetch.subphase; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.script.SearchScript; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; @@ -62,7 +62,7 @@ public final class ScriptFieldsFetchSubPhase implements FetchSubPhase { hitContext.hit().fields(new HashMap<>(2)); } - SearchHitField hitField = hitContext.hit().getFields().get(scriptField.name()); + DocumentField hitField = hitContext.hit().getFields().get(scriptField.name()); if (hitField == null) { final List values; if (value instanceof Collection) { @@ -71,7 +71,7 @@ public final class ScriptFieldsFetchSubPhase implements FetchSubPhase { } else { values = Collections.singletonList(value); } - hitField = new SearchHitField(scriptField.name(), values); + hitField = new DocumentField(scriptField.name(), values); hitContext.hit().getFields().put(scriptField.name(), hitField); } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index c08eea2e588..22895807af6 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -87,29 +87,6 @@ public class FastVectorHighlighter implements Highlighter { HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY); try { - FieldQuery fieldQuery; - if (field.fieldOptions().requireFieldMatch()) { - if (cache.fieldMatchFieldQuery == null) { - /* - * we use top level reader to rewrite the query against all readers, - * with use caching it across hits (and across readers...) - */ - cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, - hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch()); - } - fieldQuery = cache.fieldMatchFieldQuery; - } else { - if (cache.noFieldMatchFieldQuery == null) { - /* - * we use top level reader to rewrite the query against all readers, - * with use caching it across hits (and across readers...) - */ - cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, - hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch()); - } - fieldQuery = cache.noFieldMatchFieldQuery; - } - MapperHighlightEntry entry = cache.mappers.get(mapper); if (entry == null) { FragListBuilder fragListBuilder; @@ -151,6 +128,21 @@ public class FastVectorHighlighter implements Highlighter { } fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue); entry = new MapperHighlightEntry(); + if (field.fieldOptions().requireFieldMatch()) { + /** + * we use top level reader to rewrite the query against all readers, + * with use caching it across hits (and across readers...) + */ + entry.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, + hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch()); + } else { + /** + * we use top level reader to rewrite the query against all readers, + * with use caching it across hits (and across readers...) + */ + entry.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.query, + hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch()); + } entry.fragListBuilder = fragListBuilder; entry.fragmentsBuilder = fragmentsBuilder; if (cache.fvh == null) { @@ -162,6 +154,12 @@ public class FastVectorHighlighter implements Highlighter { CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter()); cache.mappers.put(mapper, entry); } + final FieldQuery fieldQuery; + if (field.fieldOptions().requireFieldMatch()) { + fieldQuery = entry.fieldMatchFieldQuery; + } else { + fieldQuery = entry.noFieldMatchFieldQuery; + } cache.fvh.setPhraseLimit(field.fieldOptions().phraseLimit()); String[] fragments; @@ -249,12 +247,12 @@ public class FastVectorHighlighter implements Highlighter { private class MapperHighlightEntry { public FragListBuilder fragListBuilder; public FragmentsBuilder fragmentsBuilder; + public FieldQuery noFieldMatchFieldQuery; + public FieldQuery fieldMatchFieldQuery; } private class HighlighterEntry { public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh; - public FieldQuery noFieldMatchFieldQuery; - public FieldQuery fieldMatchFieldQuery; public Map mappers = new HashMap<>(); } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index 684c7ddbddd..64f9b6365b3 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.uhighlight.Snippet; import org.apache.lucene.search.uhighlight.BoundedBreakIteratorScanner; import org.apache.lucene.search.uhighlight.CustomPassageFormatter; +import org.apache.lucene.search.uhighlight.CustomSeparatorBreakIterator; import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; @@ -96,9 +97,7 @@ public class UnifiedHighlighter implements Highlighter { // breaks the text on, so we don't lose the distinction between the different values of a field and we // get back a snippet per value String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR); - org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator breakIterator = - new org.apache.lucene.search.postingshighlight - .CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + CustomSeparatorBreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); highlighter = new CustomUnifiedHighlighter(searcher, analyzer, mapperHighlighterEntry.passageFormatter, field.fieldOptions().boundaryScannerLocale(), breakIterator, fieldValue, diff --git a/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java b/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java index 163dbcc73d9..75d48d5d637 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ScrollContext.java @@ -30,7 +30,7 @@ public final class ScrollContext { private Map context = null; - public int totalHits = -1; + public long totalHits = -1; public float maxScore; public ScoreDoc lastEmittedDoc; public Scroll scroll; diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/ProfileWeight.java b/core/src/main/java/org/elasticsearch/search/profile/query/ProfileWeight.java index 4361267bfe6..7cb50b29219 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/ProfileWeight.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/ProfileWeight.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.elasticsearch.search.profile.Timer; @@ -49,19 +50,50 @@ public final class ProfileWeight extends Weight { @Override public Scorer scorer(LeafReaderContext context) throws IOException { + ScorerSupplier supplier = scorerSupplier(context); + if (supplier == null) { + return null; + } + return supplier.get(false); + } + + @Override + public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { Timer timer = profile.getTimer(QueryTimingType.BUILD_SCORER); timer.start(); - final Scorer subQueryScorer; + final ScorerSupplier subQueryScorerSupplier; try { - subQueryScorer = subQueryWeight.scorer(context); + subQueryScorerSupplier = subQueryWeight.scorerSupplier(context); } finally { timer.stop(); } - if (subQueryScorer == null) { + if (subQueryScorerSupplier == null) { return null; } - return new ProfileScorer(this, subQueryScorer, profile); + final ProfileWeight weight = this; + return new ScorerSupplier() { + + @Override + public Scorer get(boolean randomAccess) throws IOException { + timer.start(); + try { + return new ProfileScorer(weight, subQueryScorerSupplier.get(randomAccess), profile); + } finally { + timer.stop(); + } + } + + @Override + public long cost() { + timer.start(); + try { + return subQueryScorerSupplier.cost(); + } finally { + timer.stop(); + } + } + }; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index f071c62f12c..8549f42040f 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -56,7 +56,7 @@ public final class QuerySearchResult extends SearchPhaseResult { private ProfileShardResult profileShardResults; private boolean hasProfileResults; private boolean hasScoreDocs; - private int totalHits; + private long totalHits; private float maxScore; public QuerySearchResult() { @@ -317,7 +317,7 @@ public final class QuerySearchResult extends SearchPhaseResult { out.writeOptionalWriteable(profileShardResults); } - public int getTotalHits() { + public long getTotalHits() { return totalHits; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java index c2f20f1a794..f55554a4457 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -19,8 +19,10 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.CollectionUtil; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -48,6 +50,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.stream.Collectors; @@ -177,7 +180,16 @@ public class Suggest implements Iterable>> suggestions = new ArrayList<>(); while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { - suggestions.add(Suggestion.fromXContent(parser)); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + String currentField = parser.currentName(); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); + Suggestion> suggestion = Suggestion.fromXContent(parser); + if (suggestion != null) { + suggestions.add(suggestion); + } else { + throw new ParsingException(parser.getTokenLocation(), + String.format(Locale.ROOT, "Could not parse suggestion keyed as [%s]", currentField)); + } } return new Suggest(suggestions); } @@ -386,14 +398,16 @@ public class Suggest implements Iterable> fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); - return XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Suggestion.class); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser::getTokenLocation); + SetOnce suggestion = new SetOnce<>(); + XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Suggestion.class, suggestion::set); + return suggestion.get(); } protected static > void parseEntries(XContentParser parser, Suggestion suggestion, CheckedFunction entryParser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser::getTokenLocation); while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { suggestion.addTerm(entryParser.apply(parser)); } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java index 8b5761a7e9a..c9b8356362c 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java @@ -20,7 +20,8 @@ package org.elasticsearch.search.suggest.completion; import com.carrotsearch.hppc.ObjectLongHashMap; -import org.apache.lucene.index.Fields; + +import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -49,14 +50,13 @@ public class CompletionFieldStats { for (LeafReaderContext atomicReaderContext : indexReader.leaves()) { LeafReader atomicReader = atomicReaderContext.reader(); try { - Fields fields = atomicReader.fields(); - for (String fieldName : fields) { - Terms terms = fields.terms(fieldName); + for (FieldInfo info : atomicReader.getFieldInfos()) { + Terms terms = atomicReader.terms(info.name); if (terms instanceof CompletionTerms) { // TODO: currently we load up the suggester for reporting its size long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed(); - if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, fieldName)) { - completionFields.addTo(fieldName, fstSize); + if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, info.name)) { + completionFields.addTo(info.name, fstSize); } sizeInBytes += fstSize; } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index a54e72159f8..de0a52ed0e4 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -21,6 +21,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -346,6 +347,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, return toXContentSnapshot(builder, params); } + final boolean verbose = params.paramAsBoolean("verbose", GetSnapshotsRequest.DEFAULT_VERBOSE_MODE); // write snapshot info for the API and any other situations builder.startObject(); builder.field(SNAPSHOT, snapshotId.getName()); @@ -359,22 +361,22 @@ public final class SnapshotInfo implements Comparable, ToXContent, builder.value(index); } builder.endArray(); - if (state != null) { + if (verbose || state != null) { builder.field(STATE, state); } if (reason != null) { builder.field(REASON, reason); } - if (startTime != 0) { + if (verbose || startTime != 0) { builder.field(START_TIME, DATE_TIME_FORMATTER.printer().print(startTime)); builder.field(START_TIME_IN_MILLIS, startTime); } - if (endTime != 0) { + if (verbose || endTime != 0) { builder.field(END_TIME, DATE_TIME_FORMATTER.printer().print(endTime)); builder.field(END_TIME_IN_MILLIS, endTime); builder.timeValueField(DURATION_IN_MILLIS, DURATION, endTime - startTime); } - if (!shardFailures.isEmpty()) { + if (verbose || !shardFailures.isEmpty()) { builder.startArray(FAILURES); for (SnapshotShardFailure shardFailure : shardFailures) { builder.startObject(); @@ -383,7 +385,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, } builder.endArray(); } - if (totalShards != 0) { + if (verbose || totalShards != 0) { builder.startObject(SHARDS); builder.field(TOTAL, totalShards); builder.field(FAILED, failedShards()); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 03c7eb3a4af..c8dfb773281 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1180,7 +1180,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapshotInfo) { if (completedSnapshot.equals(snapshot)) { - logger.trace("deleted snapshot completed - deleting files"); + logger.debug("deleted snapshot completed - deleting files"); removeListener(this); threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), @@ -1214,7 +1214,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } }); } else { - logger.trace("deleted snapshot is not running - deleting files"); + logger.debug("deleted snapshot is not running - deleting files"); deleteSnapshotFromRepository(snapshot, listener, repositoryStateId); } } diff --git a/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java b/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java index b3c1a8929a6..685e9bcf352 100644 --- a/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java +++ b/core/src/main/java/org/elasticsearch/tasks/CancellableTask.java @@ -52,7 +52,7 @@ public abstract class CancellableTask extends Task { } /** - * Returns true if this task should can potentially have children that needs to be cancelled when the parent is cancelled. + * Returns true if this task can potentially have children that need to be cancelled when it parent is cancelled. */ public abstract boolean shouldCancelChildrenOnCancellation(); diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index b61da9c27f1..e7efe3ba3be 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -54,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -298,16 +299,24 @@ public class ThreadPool extends AbstractComponent implements Closeable { } /** - * Get the generic executor service. This executor service {@link Executor#execute(Runnable)} method will run the {@link Runnable} it - * is given in the {@link ThreadContext} of the thread that queues it. + * Get the generic {@link ExecutorService}. This executor service + * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the + * {@link ThreadContext} of the thread that queues it. + *

+ * Warning: this {@linkplain ExecutorService} will not throw {@link RejectedExecutionException} + * if you submit a task while it shutdown. It will instead silently queue it and not run it. */ public ExecutorService generic() { return executor(Names.GENERIC); } /** - * Get the executor service with the given name. This executor service's {@link Executor#execute(Runnable)} method will run the - * {@link Runnable} it is given in the {@link ThreadContext} of the thread that queues it. + * Get the {@link ExecutorService} with the given name. This executor service's + * {@link Executor#execute(Runnable)} method will run the {@link Runnable} it is given in the + * {@link ThreadContext} of the thread that queues it. + *

+ * Warning: this {@linkplain ExecutorService} might not throw {@link RejectedExecutionException} + * if you submit a task while it shutdown. It will instead silently queue it and not run it. * * @param name the name of the executor service to obtain * @throws IllegalArgumentException if no executor service with the specified name exists diff --git a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 59da9bee7ef..af8ecdbf535 100644 --- a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; @@ -56,7 +55,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.Semaphore; @@ -64,7 +62,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the @@ -83,8 +80,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private final TransportService transportService; private final ConnectionProfile remoteProfile; - private final Set connectedNodes = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final Supplier nodeSupplier; + private final ConnectedNodes connectedNodes; private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; @@ -116,19 +112,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY); remoteProfile = builder.build(); - nodeSupplier = new Supplier() { - private volatile Iterator current; - @Override - public DiscoveryNode get() { - if (current == null || current.hasNext() == false) { - current = connectedNodes.iterator(); - if (current.hasNext() == false) { - throw new IllegalStateException("No node available for cluster: " + clusterAlias + " nodes: " + connectedNodes); - } - } - return current.next(); - } - }; + connectedNodes = new ConnectedNodes(clusterAlias); this.seedNodes = Collections.unmodifiableList(seedNodes); this.connectHandler = new ConnectHandler(); transportService.addConnectionListener(this); @@ -156,7 +140,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo */ public void fetchSearchShards(ClusterSearchShardsRequest searchRequest, ActionListener listener) { - if (connectedNodes.isEmpty()) { + if (connectedNodes.size() == 0) { // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener // this will cause some back pressure on the search end and eventually will cause rejections but that's fine // we can't proceed with a search on a cluster level. @@ -173,7 +157,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * will invoke the listener immediately. */ public void ensureConnected(ActionListener voidActionListener) { - if (connectedNodes.isEmpty()) { + if (connectedNodes.size() == 0) { connectHandler.connect(voidActionListener); } else { voidActionListener.onResponse(null); @@ -182,7 +166,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, final ActionListener listener) { - final DiscoveryNode node = nodeSupplier.get(); + final DiscoveryNode node = connectedNodes.get(); transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, new TransportResponseHandler() { @@ -218,7 +202,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo request.clear(); request.nodes(true); request.local(true); // run this on the node that gets the request it's as good as any other - final DiscoveryNode node = nodeSupplier.get(); + final DiscoveryNode node = connectedNodes.get(); transportService.sendRequest(node, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, new TransportResponseHandler() { @Override @@ -243,7 +227,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo } }); }; - if (connectedNodes.isEmpty()) { + if (connectedNodes.size() == 0) { // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener // this will cause some back pressure on the search end and eventually will cause rejections but that's fine // we can't proceed with a search on a cluster level. @@ -260,7 +244,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * given node. */ Transport.Connection getConnection(DiscoveryNode remoteClusterNode) { - DiscoveryNode discoveryNode = nodeSupplier.get(); + DiscoveryNode discoveryNode = connectedNodes.get(); Transport.Connection connection = transportService.getConnection(discoveryNode); return new Transport.Connection() { @Override @@ -283,12 +267,11 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo } Transport.Connection getConnection() { - DiscoveryNode discoveryNode = nodeSupplier.get(); + DiscoveryNode discoveryNode = connectedNodes.get(); return transportService.getConnection(discoveryNode); } - - @Override + @Override public void close() throws IOException { connectHandler.close(); } @@ -583,12 +566,19 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo return connectedNodes.contains(node); } + DiscoveryNode getConnectedNode() { + return connectedNodes.get(); + } + + void addConnectedNode(DiscoveryNode node) { + connectedNodes.add(node); + } /** * Fetches connection info for this connection */ public void getConnectionInfo(ActionListener listener) { - final Optional anyNode = connectedNodes.stream().findAny(); + final Optional anyNode = connectedNodes.getAny(); if (anyNode.isPresent() == false) { // not connected we return immediately RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, @@ -650,4 +640,68 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo int getNumNodesConnected() { return connectedNodes.size(); } + + private static class ConnectedNodes implements Supplier { + + private final Set nodeSet = new HashSet<>(); + private final String clusterAlias; + + private Iterator currentIterator = null; + + private ConnectedNodes(String clusterAlias) { + this.clusterAlias = clusterAlias; + } + + @Override + public synchronized DiscoveryNode get() { + ensureIteratorAvailable(); + if (currentIterator.hasNext()) { + return currentIterator.next(); + } else { + throw new IllegalStateException("No node available for cluster: " + clusterAlias); + } + } + + synchronized boolean remove(DiscoveryNode node) { + final boolean setRemoval = nodeSet.remove(node); + if (setRemoval) { + currentIterator = null; + } + return setRemoval; + } + + synchronized boolean add(DiscoveryNode node) { + final boolean added = nodeSet.add(node); + if (added) { + currentIterator = null; + } + return added; + } + + synchronized int size() { + return nodeSet.size(); + } + + synchronized boolean contains(DiscoveryNode node) { + return nodeSet.contains(node); + } + + synchronized Optional getAny() { + ensureIteratorAvailable(); + if (currentIterator.hasNext()) { + return Optional.of(currentIterator.next()); + } else { + return Optional.empty(); + } + } + + private synchronized void ensureIteratorAvailable() { + if (currentIterator == null) { + currentIterator = nodeSet.iterator(); + } else if (currentIterator.hasNext() == false && nodeSet.isEmpty() == false) { + // iterator rollover + currentIterator = nodeSet.iterator(); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 22aced389f8..9631fc977c9 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -50,6 +50,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; @@ -86,6 +87,7 @@ import java.util.Collections; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -112,7 +114,6 @@ import static org.elasticsearch.common.settings.Setting.timeSetting; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { @@ -160,7 +161,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i protected volatile TransportServiceAdapter transportServiceAdapter; // node id to actual channel protected final ConcurrentMap connectedNodes = newConcurrentMap(); - private final Set openConnections = newConcurrentSet(); protected final Map> serverChannels = newConcurrentMap(); protected final ConcurrentMap profileBoundAddresses = newConcurrentMap(); @@ -170,7 +170,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i // this lock is here to make sure we close this transport and disconnect all the client nodes // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?) - protected final ReadWriteLock globalLock = new ReentrantReadWriteLock(); + protected final ReadWriteLock closeLock = new ReentrantReadWriteLock(); protected final boolean compress; protected volatile BoundTransportAddress boundAddress; private final String transportName; @@ -181,6 +181,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i private final CounterMetric numHandshakes = new CounterMetric(); private static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake"; + private final MeanMetric readBytesMetric = new MeanMetric(); + private final MeanMetric transmittedBytesMetric = new MeanMetric(); + public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { @@ -300,14 +303,14 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i DiscoveryNode node = entry.getKey(); NodeChannels channels = entry.getValue(); for (Channel channel : channels.getChannels()) { - internalSendMessage(channel, pingHeader, new NotifyOnceListener() { + internalSendMessage(channel, pingHeader, new SendMetricListener(pingHeader.length()) { @Override - public void innerOnResponse(Channel channel) { + protected void innerInnerOnResponse(Channel channel) { successfulPings.inc(); } @Override - public void innerOnFailure(Exception e) { + protected void innerOnFailure(Exception e) { if (isOpen(channel)) { logger.debug( (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); @@ -386,15 +389,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i return version; } - public boolean hasChannel(Channel channel) { - for (Channel channel1 : channels) { - if (channel.equals(channel1)) { - return true; - } - } - return false; - } - public List getChannels() { return Arrays.asList(channels); } @@ -408,12 +402,12 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } @Override - public synchronized void close() throws IOException { + public void close() throws IOException { if (closed.compareAndSet(false, true)) { try { closeChannels(Arrays.stream(channels).filter(Objects::nonNull).collect(Collectors.toList())); } finally { - onNodeChannelsClosed(this); + transportServiceAdapter.onConnectionClosed(this); } } } @@ -432,6 +426,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i Channel channel = channel(options.type()); sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte) 0); } + + boolean isClosed() { + return closed.get(); + } } @Override @@ -447,7 +445,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i if (node == null) { throw new ConnectTransportException(null, "can't connect to a null node"); } - globalLock.readLock().lock(); // ensure we don't open connections while we are closing + closeLock.readLock().lock(); // ensure we don't open connections while we are closing try { ensureOpen(); try (Releasable ignored = connectionLock.acquire(node.getId())) { @@ -464,7 +462,24 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i if (logger.isDebugEnabled()) { logger.debug("connected to node [{}]", node); } - transportServiceAdapter.onNodeConnected(node); + try { + transportServiceAdapter.onNodeConnected(node); + } finally { + if (nodeChannels.isClosed()) { + // we got closed concurrently due to a disconnect or some other event on the channel. + // the close callback will close the NodeChannel instance first and then try to remove + // the connection from the connected nodes. It will NOT acquire the connectionLock for + // the node to prevent any blocking calls on network threads. Yet, we still establish a happens + // before relationship to the connectedNodes.put since we check if we can remove the + // (DiscoveryNode, NodeChannels) tuple from the map after we closed. Here we check if it's closed an if so we + // try to remove it first either way one of the two wins even if the callback has run before we even added the + // tuple to the map since in that case we remove it here again + if (connectedNodes.remove(node, nodeChannels)) { + transportServiceAdapter.onNodeDisconnected(node); + } + throw new NodeNotConnectedException(node, "connection concurrently closed"); + } + } success = true; } catch (ConnectTransportException e) { throw e; @@ -480,7 +495,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } } finally { - globalLock.readLock().unlock(); + closeLock.readLock().unlock(); } } @@ -515,11 +530,12 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i boolean success = false; NodeChannels nodeChannels = null; connectionProfile = resolveConnectionProfile(connectionProfile, defaultConnectionProfile); - globalLock.readLock().lock(); // ensure we don't open connections while we are closing + closeLock.readLock().lock(); // ensure we don't open connections while we are closing try { ensureOpen(); try { - AtomicBoolean runOnce = new AtomicBoolean(false); + final AtomicBoolean runOnce = new AtomicBoolean(false); + final AtomicReference connectionRef = new AtomicReference<>(); Consumer onClose = c -> { assert isOpen(c) == false : "channel is still open when onClose is called"; try { @@ -528,7 +544,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i // we only need to disconnect from the nodes once since all other channels // will also try to run this we protect it from running multiple times. if (runOnce.compareAndSet(false, true)) { - disconnectFromNodeChannel(c, "channel closed"); + NodeChannels connection = connectionRef.get(); + if (connection != null) { + disconnectFromNodeCloseAndNotify(node, connection); + } } } }; @@ -542,7 +561,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i final Version version = executeHandshake(node, channel, handshakeTimeout); nodeChannels = new NodeChannels(nodeChannels, version); // clone the channels - we now have the correct version transportServiceAdapter.onConnectionOpened(nodeChannels); - openConnections.add(nodeChannels); + connectionRef.set(nodeChannels); success = true; return nodeChannels; } catch (ConnectTransportException e) { @@ -557,77 +576,38 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } } finally { - globalLock.readLock().unlock(); + closeLock.readLock().unlock(); } } - /** - * Disconnects from a node, only if the relevant channel is found to be part of the node channels. - */ - protected boolean disconnectFromNode(DiscoveryNode node, Channel channel, String reason) { - // this might be called multiple times from all the node channels, so do a lightweight - // check outside of the lock - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels != null && nodeChannels.hasChannel(channel)) { - try (Releasable ignored = connectionLock.acquire(node.getId())) { - nodeChannels = connectedNodes.get(node); - // check again within the connection lock, if its still applicable to remove it - if (nodeChannels != null && nodeChannels.hasChannel(channel)) { - connectedNodes.remove(node); - closeAndNotify(node, nodeChannels, reason); - return true; - } - } - } - return false; - } - - private void closeAndNotify(DiscoveryNode node, NodeChannels nodeChannels, String reason) { + private void disconnectFromNodeCloseAndNotify(DiscoveryNode node, NodeChannels nodeChannels) { + assert nodeChannels != null : "nodeChannels must not be null"; try { - logger.debug("disconnecting from [{}], {}", node, reason); IOUtils.closeWhileHandlingException(nodeChannels); } finally { - logger.trace("disconnected from [{}], {}", node, reason); - transportServiceAdapter.onNodeDisconnected(node); + if (closeLock.readLock().tryLock()) { + try { + if (connectedNodes.remove(node, nodeChannels)) { + transportServiceAdapter.onNodeDisconnected(node); + } + } finally { + closeLock.readLock().unlock(); + } + } } } /** * Disconnects from a node if a channel is found as part of that nodes channels. */ - protected final void disconnectFromNodeChannel(final Channel channel, final String reason) { - threadPool.generic().execute(() -> { + protected final void closeChannelWhileHandlingExceptions(final Channel channel) { + if (isOpen(channel)) { try { - if (isOpen(channel)) { - closeChannels(Collections.singletonList(channel)); - } + closeChannels(Collections.singletonList(channel)); } catch (IOException e) { logger.warn("failed to close channel", e); - } finally { - outer: - { - for (Map.Entry entry : connectedNodes.entrySet()) { - if (disconnectFromNode(entry.getKey(), channel, reason)) { - // if we managed to find this channel and disconnect from it, then break, no need to check on - // the rest of the nodes - // #onNodeChannelsClosed will remove it.. - assert openConnections.contains(entry.getValue()) == false : "NodeChannel#close should remove the connetion"; - // we can only be connected and published to a single node with one connection. So if disconnectFromNode - // returns true we can safely break out from here since we cleaned up everything needed - break outer; - } - } - // now if we haven't found the right connection in the connected nodes we have to go through the open connections - // it might be that the channel belongs to a connection that is not published - for (NodeChannels channels : openConnections) { - if (channels.hasChannel(channel)) { - IOUtils.closeWhileHandlingException(channels); - break; - } - } - } } - }); + } } @Override @@ -641,10 +621,14 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i @Override public void disconnectFromNode(DiscoveryNode node) { + closeLock.readLock().lock(); + NodeChannels nodeChannels = null; try (Releasable ignored = connectionLock.acquire(node.getId())) { - NodeChannels nodeChannels = connectedNodes.remove(node); - if (nodeChannels != null) { - closeAndNotify(node, nodeChannels, "due to explicit disconnect call"); + nodeChannels = connectedNodes.remove(node); + } finally { + closeLock.readLock().unlock(); + if (nodeChannels != null) { // if we found it and removed it we close and notify + IOUtils.closeWhileHandlingException(nodeChannels, () -> transportServiceAdapter.onNodeDisconnected(node)); } } } @@ -693,8 +677,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i continue; } Settings mergedSettings = Settings.builder() - .put(defaultSettings) - .put(profileSettings) + .put(defaultSettings.getAsMap()) + .put(profileSettings.getAsMap()) .build(); result.put(name, mergedSettings); } @@ -917,7 +901,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i final CountDownLatch latch = new CountDownLatch(1); // make sure we run it on another thread than a possible IO handler thread threadPool.generic().execute(() -> { - globalLock.writeLock().lock(); + closeLock.writeLock().lock(); try { // first stop to accept any incoming connections so nobody can connect to this transport for (Map.Entry> entry : serverChannels.entrySet()) { @@ -931,12 +915,19 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close // all instances and then clear them maps - IOUtils.closeWhileHandlingException(Iterables.concat(connectedNodes.values(), openConnections)); - openConnections.clear(); - connectedNodes.clear(); + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + transportServiceAdapter.onNodeDisconnected(next.getKey()); + } finally { + iterator.remove(); + } + } stopInternal(); } finally { - globalLock.writeLock().unlock(); + closeLock.writeLock().unlock(); latch.countDown(); } }); @@ -950,10 +941,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } protected void onException(Channel channel, Exception e) { - String reason = ExceptionsHelper.detailedMessage(e); if (!lifecycle.started()) { // just close and ignore - we are already stopped and just need to make sure we release all resources - disconnectFromNodeChannel(channel, reason); + closeChannelWhileHandlingExceptions(channel); return; } @@ -964,15 +954,15 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i channel), e); // close the channel, which will cause a node to be disconnected if relevant - disconnectFromNodeChannel(channel, reason); + closeChannelWhileHandlingExceptions(channel); } else if (isConnectException(e)) { logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant - disconnectFromNodeChannel(channel, reason); + closeChannelWhileHandlingExceptions(channel); } else if (e instanceof BindException) { logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant - disconnectFromNodeChannel(channel, reason); + closeChannelWhileHandlingExceptions(channel); } else if (e instanceof CancelledKeyException) { logger.trace( (Supplier) () -> new ParameterizedMessage( @@ -980,13 +970,14 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant - disconnectFromNodeChannel(channel, reason); + closeChannelWhileHandlingExceptions(channel); } else if (e instanceof TcpTransport.HttpOnTransportException) { // in case we are able to return data, serialize the exception content and sent it back to the client if (isOpen(channel)) { - final NotifyOnceListener closeChannel = new NotifyOnceListener() { + BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); + final SendMetricListener closeChannel = new SendMetricListener(message.length()) { @Override - public void innerOnResponse(Channel channel) { + protected void innerInnerOnResponse(Channel channel) { try { closeChannels(Collections.singletonList(channel)); } catch (IOException e1) { @@ -995,7 +986,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } @Override - public void innerOnFailure(Exception e) { + protected void innerOnFailure(Exception e) { try { closeChannels(Collections.singletonList(channel)); } catch (IOException e1) { @@ -1004,13 +995,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } }; - internalSendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), closeChannel); + internalSendMessage(channel, message, closeChannel); } } else { logger.warn( (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant - disconnectFromNodeChannel(channel, reason); + closeChannelWhileHandlingExceptions(channel); } } @@ -1086,7 +1077,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i final TransportRequestOptions finalOptions = options; // this might be called in a different thread SendListener onRequestSent = new SendListener(stream, - () -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions)); + () -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions), message.length()); internalSendMessage(targetChannel, message, onRequestSent); addedReleaseListener = true; } finally { @@ -1099,7 +1090,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i /** * sends a message to the given channel, using the given callbacks. */ - private void internalSendMessage(Channel targetChannel, BytesReference message, NotifyOnceListener listener) { + private void internalSendMessage(Channel targetChannel, BytesReference message, SendMetricListener listener) { try { sendMessage(targetChannel, message, listener); } catch (Exception ex) { @@ -1131,9 +1122,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i status = TransportStatus.setError(status); final BytesReference bytes = stream.bytes(); final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); + CompositeBytesReference message = new CompositeBytesReference(header, bytes); SendListener onResponseSent = new SendListener(null, - () -> transportServiceAdapter.onResponseSent(requestId, action, error)); - internalSendMessage(channel, new CompositeBytesReference(header, bytes), onResponseSent); + () -> transportServiceAdapter.onResponseSent(requestId, action, error), message.length()); + internalSendMessage(channel, message, onResponseSent); } } @@ -1162,13 +1154,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } threadPool.getThreadContext().writeTo(stream); stream.setVersion(nodeVersion); - BytesReference reference = buildMessage(requestId, status, nodeVersion, response, stream); + BytesReference message = buildMessage(requestId, status, nodeVersion, response, stream); final TransportResponseOptions finalOptions = options; // this might be called in a different thread SendListener listener = new SendListener(stream, - () -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions)); - internalSendMessage(channel, reference, listener); + () -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions), message.length()); + internalSendMessage(channel, message, listener); addedReleaseListener = true; } finally { if (!addedReleaseListener) { @@ -1324,7 +1316,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i public final void messageReceived(BytesReference reference, Channel channel, String profileName, InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException { final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - transportServiceAdapter.addBytesReceived(totalMessageSize); + readBytesMetric.inc(totalMessageSize); // we have additional bytes to read, outside of the header boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0; StreamInput streamIn = reference.streamInput(); @@ -1662,22 +1654,42 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } - private final class SendListener extends NotifyOnceListener { + /** + * This listener increments the transmitted bytes metric on success. + */ + private abstract class SendMetricListener extends NotifyOnceListener { + private final long messageSize; + + private SendMetricListener(long messageSize) { + this.messageSize = messageSize; + } + + @Override + protected final void innerOnResponse(T object) { + transmittedBytesMetric.inc(messageSize); + innerInnerOnResponse(object); + } + + protected abstract void innerInnerOnResponse(T object); + } + + private final class SendListener extends SendMetricListener { private final Releasable optionalReleasable; private final Runnable transportAdaptorCallback; - private SendListener(Releasable optionalReleasable, Runnable transportAdaptorCallback) { + private SendListener(Releasable optionalReleasable, Runnable transportAdaptorCallback, long messageLength) { + super(messageLength); this.optionalReleasable = optionalReleasable; this.transportAdaptorCallback = transportAdaptorCallback; } @Override - public void innerOnResponse(Channel channel) { + protected void innerInnerOnResponse(Channel channel) { release(); } @Override - public void innerOnFailure(Exception e) { + protected void innerOnFailure(Exception e) { release(); } @@ -1686,19 +1698,15 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } - private void onNodeChannelsClosed(NodeChannels channels) { - // don't assert here since the channel / connection might not have been registered yet - final boolean remove = openConnections.remove(channels); - if (remove) { - transportServiceAdapter.onConnectionClosed(channels); - } - } + /** + * Returns count of currently open connections + */ + protected abstract long getNumOpenServerConnections(); - final int getNumOpenConnections() { - return openConnections.size(); - } - - final int getNumConnectedNodes() { - return connectedNodes.size(); + @Override + public final TransportStats getStats() { + return new TransportStats( + getNumOpenServerConnections(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(), + transmittedBytesMetric.sum()); } } diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index a32289332ea..5d22e156d9d 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -75,11 +75,6 @@ public interface Transport extends LifecycleComponent { */ void disconnectFromNode(DiscoveryNode node); - /** - * Returns count of currently open connections - */ - long serverOpen(); - List getLocalAddresses(); default CircuitBreaker getInFlightRequestBreaker() { @@ -110,6 +105,8 @@ public interface Transport extends LifecycleComponent { */ Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException; + TransportStats getStats(); + /** * A unidirectional connection to a {@link DiscoveryNode} */ diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index e5382e4e261..13034355366 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -203,8 +203,6 @@ public class TransportService extends AbstractLifecycleComponent { @Override protected void doStart() { - adapter.rxMetric.clear(); - adapter.txMetric.clear(); transport.transportServiceAdapter(adapter); transport.start(); @@ -292,8 +290,7 @@ public class TransportService extends AbstractLifecycleComponent { } public TransportStats stats() { - return new TransportStats( - transport.serverOpen(), adapter.rxMetric.count(), adapter.rxMetric.sum(), adapter.txMetric.count(), adapter.txMetric.sum()); + return transport.getStats(); } public BoundTransportAddress boundAddress() { @@ -527,6 +524,19 @@ public class TransportService extends AbstractLifecycleComponent { } } + public final void sendChildRequest(final DiscoveryNode node, final String action, + final TransportRequest request, final Task parentTask, + final TransportRequestOptions options, + final TransportResponseHandler handler) { + try { + Transport.Connection connection = getConnection(node); + sendChildRequest(connection, action, request, parentTask, options, handler); + } catch (NodeNotConnectedException ex) { + // the caller might not handle this so we invoke the handler + handler.handleException(ex); + } + } + public void sendChildRequest(final Transport.Connection connection, final String action, final TransportRequest request, final Task parentTask, final TransportResponseHandler handler) { @@ -738,19 +748,6 @@ public class TransportService extends AbstractLifecycleComponent { protected class Adapter implements TransportServiceAdapter { - final MeanMetric rxMetric = new MeanMetric(); - final MeanMetric txMetric = new MeanMetric(); - - @Override - public void addBytesReceived(long size) { - rxMetric.inc(size); - } - - @Override - public void addBytesSent(long size) { - txMetric.inc(size); - } - @Override public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) { diff --git a/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java b/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java index 70748b01a68..24a71a99998 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportServiceAdapter.java @@ -23,10 +23,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; public interface TransportServiceAdapter extends TransportConnectionListener { - void addBytesReceived(long size); - - void addBytesSent(long size); - /** called by the {@link Transport} implementation once a request has been sent */ void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options); diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 120cf3dbb3e..81ed347382b 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -49,6 +49,8 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; @@ -66,6 +68,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.TransportSettings; import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -75,6 +78,7 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -214,8 +218,8 @@ public class TribeService extends AbstractLifecycleComponent { private final NamedWriteableRegistry namedWriteableRegistry; - public TribeService(Settings settings, ClusterService clusterService, final String tribeNodeId, - NamedWriteableRegistry namedWriteableRegistry, Function clientNodeBuilder) { + public TribeService(Settings settings, Path configPath, ClusterService clusterService, final String tribeNodeId, + NamedWriteableRegistry namedWriteableRegistry, BiFunction clientNodeBuilder) { super(settings); this.clusterService = clusterService; this.namedWriteableRegistry = namedWriteableRegistry; @@ -224,12 +228,16 @@ public class TribeService extends AbstractLifecycleComponent { nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client for (Map.Entry entry : nodesSettings.entrySet()) { Settings clientSettings = buildClientSettings(entry.getKey(), tribeNodeId, settings, entry.getValue()); - nodes.add(clientNodeBuilder.apply(clientSettings)); + nodes.add(clientNodeBuilder.apply(clientSettings, configPath)); } this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); this.blockIndicesRead = BLOCKS_READ_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); this.blockIndicesWrite = BLOCKS_WRITE_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); + if (!nodes.isEmpty()) { + new DeprecationLogger(Loggers.getLogger(TribeService.class)) + .deprecated("tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0"); + } this.onConflict = ON_CONFLICT_SETTING.get(settings); } @@ -247,9 +255,6 @@ public class TribeService extends AbstractLifecycleComponent { Settings.Builder sb = Settings.builder().put(tribeSettings); sb.put(Node.NODE_NAME_SETTING.getKey(), Node.NODE_NAME_SETTING.get(globalSettings) + "/" + tribeName); sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(globalSettings)); // pass through ES home dir - if (Environment.PATH_CONF_SETTING.exists(globalSettings)) { - sb.put(Environment.PATH_CONF_SETTING.getKey(), Environment.PATH_CONF_SETTING.get(globalSettings)); - } if (Environment.PATH_LOGS_SETTING.exists(globalSettings)) { sb.put(Environment.PATH_LOGS_SETTING.getKey(), Environment.PATH_LOGS_SETTING.get(globalSettings)); } diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index f6ad88c9572..8496eaa529d 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-7.0.0-snapshot-a0aef2f.jar}" { +grant codeBase "${codebase.lucene-core-7.0.0-snapshot-ad2cb77.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-7.0.0-snapshot-a0aef2f.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-7.0.0-snapshot-a0aef2f.jar}" { +grant codeBase "${codebase.lucene-misc-7.0.0-snapshot-ad2cb77.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 97e14b6994a..6f5d0ac924e 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-7.0.0-snapshot-a0aef2f.jar}" { +grant codeBase "${codebase.lucene-test-framework-7.0.0-snapshot-ad2cb77.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS @@ -58,7 +58,7 @@ grant codeBase "${codebase.junit-4.12.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.mocksocket-1.1.jar}" { +grant codeBase "${codebase.mocksocket-1.2.jar}" { // mocksocket makes and accepts socket connections permission java.net.SocketPermission "*", "accept,connect"; }; diff --git a/core/src/test/java/org/apache/lucene/queries/SearchAfterSortedDocQueryTests.java b/core/src/test/java/org/apache/lucene/queries/SearchAfterSortedDocQueryTests.java index 25c5ff6fa21..0405849554e 100644 --- a/core/src/test/java/org/apache/lucene/queries/SearchAfterSortedDocQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/SearchAfterSortedDocQueryTests.java @@ -98,7 +98,7 @@ public class SearchAfterSortedDocQueryTests extends ESTestCase { } } final IndexReader reader = w.getReader(); - final IndexSearcher searcher = newSearcher(reader); + final IndexSearcher searcher = new IndexSearcher(reader); int step = randomIntBetween(1, 10); FixedBitSet bitSet = new FixedBitSet(numDocs); diff --git a/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index eec611146a6..27544448e0c 100644 --- a/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/core/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -75,7 +75,7 @@ public class CustomUnifiedHighlighterTests extends ESTestCase { IndexSearcher searcher = newSearcher(reader); iw.close(); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); - assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.totalHits, equalTo(1L)); String rawValue = Strings.arrayToDelimitedString(inputs, String.valueOf(MULTIVAL_SEP_CHAR)); CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer, new CustomPassageFormatter("", "", new DefaultEncoder()), locale, breakIterator, rawValue, diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index d8cd635f33f..9591ec210da 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -352,4 +352,12 @@ public class VersionTests extends ESTestCase { return result; } + // This exists because 5.1.0 was never released due to a mistake in the release process. + // This verifies that we never declare the version as "released" accidentally. + // It would never pass qa tests later on, but those come very far in the build and this is quick to check now. + public void testUnreleasedVersion() { + Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); + VersionTests.assertUnknownVersion(VERSION_5_1_0_UNRELEASED); + } + } diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 0f3812c0cd6..6c20e63545f 100644 --- a/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.action; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; @@ -66,6 +67,7 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.replication.TransportReplicationActionTests; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; @@ -86,7 +88,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.script.ScriptType; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; @@ -486,7 +487,7 @@ public class IndicesRequestIT extends ESIntegTestCase { public void testDeleteIndex() { interceptTransportActions(DeleteIndexAction.NAME); - String[] randomIndicesOrAliases = randomUniqueIndicesOrAliases(); + String[] randomIndicesOrAliases = randomUniqueIndices(); DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(randomIndicesOrAliases); assertAcked(internalCluster().coordOnlyNodeClient().admin().indices().delete(deleteIndexRequest).actionGet()); @@ -644,12 +645,8 @@ public class IndicesRequestIT extends ESIntegTestCase { } private String[] randomUniqueIndicesOrAliases() { - Set uniqueIndices = new HashSet<>(); - int count = randomIntBetween(1, this.indices.size()); - while (uniqueIndices.size() < count) { - uniqueIndices.add(randomFrom(this.indices)); - } - String[] indices = new String[count]; + String[] uniqueIndices = randomUniqueIndices(); + String[] indices = new String[uniqueIndices.length]; int i = 0; for (String index : uniqueIndices) { indices[i++] = randomBoolean() ? index + "-alias" : index; @@ -657,6 +654,15 @@ public class IndicesRequestIT extends ESIntegTestCase { return indices; } + private String[] randomUniqueIndices() { + Set uniqueIndices = new HashSet<>(); + int count = randomIntBetween(1, this.indices.size()); + while (uniqueIndices.size() < count) { + uniqueIndices.add(randomFrom(this.indices)); + } + return uniqueIndices.toArray(new String[uniqueIndices.size()]); + } + private static void assertAllRequestsHaveBeenConsumed() { Iterable pluginsServices = internalCluster().getInstances(PluginsService.class); for (PluginsService pluginsService : pluginsServices) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index ec981442b57..f113f49a415 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -386,7 +386,7 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin { private List tasks; public UnblockTestTasksResponse() { - + super(null, null); } public UnblockTestTasksResponse(List tasks, List taskFailures, List tasks; TestTasksResponse() { - + super(null, null); } TestTasksResponse(List tasks, List taskFailures, diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 36c7da7894b..3c2e10d181b 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -25,24 +25,36 @@ import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.segments.IndexSegments; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -50,10 +62,14 @@ import org.elasticsearch.test.VersionUtils; import java.util.Arrays; import java.util.Collection; +import java.util.List; +import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class ShrinkIndexIT extends ESIntegTestCase { @@ -135,6 +151,81 @@ public class ShrinkIndexIT extends ESIntegTestCase { assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); } + public void testShrinkIndexPrimaryTerm() throws Exception { + final List factors = Arrays.asList(2, 3, 5, 7); + final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size()), factors); + final int numberOfShards = numberOfShardsFactors.stream().reduce(1, (x, y) -> x * y); + final int numberOfTargetShards = randomSubsetOf(numberOfShardsFactors).stream().reduce(1, (x, y) -> x * y); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); + + final ImmutableOpenMap dataNodes = + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); + final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + final String mergeNode = discoveryNodes[0].getName(); + ensureGreen(); + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = + new IndexRequest("source", "type", s).source("{ \"f\": \"" + s + "\"}", XContentType.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + // relocate all shards to one node such that we can merge it. + final Settings.Builder prepareShrinkSettings = + Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); + ensureGreen(); + + final IndexMetaData indexMetaData = indexMetaData(client(), "source"); + final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetaData::primaryTerm).max().getAsLong(); + + // now merge source into target + final Settings shrinkSettings = + Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); + assertAcked(client().admin().indices().prepareShrinkIndex("source", "target").setSettings(shrinkSettings).get()); + + ensureGreen(); + + final IndexMetaData afterShrinkIndexMetaData = indexMetaData(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(afterShrinkIndexMetaData.primaryTerm(shardId), equalTo(beforeShrinkPrimaryTerm + 1)); + } + } + + private static IndexMetaData indexMetaData(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metaData().index(index); + } + public void testCreateShrinkIndex() { internalCluster().ensureAtLeastNumDataNodes(2); Version version = VersionUtils.randomVersion(random()); @@ -142,15 +233,15 @@ public class ShrinkIndexIT extends ESIntegTestCase { .put("number_of_shards", randomIntBetween(2, 7)) .put("index.version.created", version) ).get(); - for (int i = 0; i < 20; i++) { + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { client().prepareIndex("source", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); + ImmutableOpenMap dataNodes = + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); - String mergeNode = discoveryNodes[0].getName(); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. @@ -158,16 +249,53 @@ public class ShrinkIndexIT extends ESIntegTestCase { // relocate all shards to one node such that we can merge it. client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder() - .put("index.routing.allocation.require._name", mergeNode) + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) .put("index.blocks.write", true)).get(); ensureGreen(); - // now merge source into a single shard index + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none" + )).get(); + + + // now merge source into a single shard index final boolean createWithReplicas = randomBoolean(); assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get()); ensureGreen(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + // resolve true merge node - this is not always the node we required as all shards may be on another node + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("merge node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats).mapToLong(SeqNoStats::getMaxSeqNo).max().getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), equalTo(maxUnsafeAutoIdTimestamp)); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); if (createWithReplicas == false) { // bump replicas @@ -175,18 +303,23 @@ public class ShrinkIndexIT extends ESIntegTestCase { .setSettings(Settings.builder() .put("index.number_of_replicas", 1)).get(); ensureGreen(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); } - for (int i = 20; i < 40; i++) { + for (int i = docs; i < 2 * docs; i++) { client().prepareIndex("target", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } flushAndRefresh(); - assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40); - assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + + // clean up + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null + )).get(); } /** * Tests that we can manually recover from a failed allocation due to shards being moved away etc. diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 22377ea1769..8fcc76e018a 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -34,13 +34,10 @@ public class BulkIntegrationIT extends ESIntegTestCase { BulkRequestBuilder bulkBuilder = client().prepareBulk(); bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); bulkBuilder.get(); - assertBusy(new Runnable() { - @Override - public void run() { - GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get(); - assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30")); - assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs")); - } + assertBusy(() -> { + GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get(); + assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30")); + assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs")); }); } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java index 24f3afdd795..4a55f0c8b95 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.delete.DeleteResponseTests; import org.elasticsearch.action.index.IndexResponseTests; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponseTests; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -137,11 +138,12 @@ public class BulkItemResponseTests extends ESTestCase { assertDeepEquals((ElasticsearchException) expectedFailure.getCause(), (ElasticsearchException) actualFailure.getCause()); } else { + DocWriteResponse expectedDocResponse = expected.getResponse(); + DocWriteResponse actualDocResponse = expected.getResponse(); + + IndexResponseTests.assertDocWriteResponse(expectedDocResponse, actualDocResponse); if (expected.getOpType() == DocWriteRequest.OpType.UPDATE) { - UpdateResponseTests.assertUpdateResponse(expected.getResponse(), actual.getResponse()); - } else { - // assertDocWriteResponse check the result for INDEX/CREATE and DELETE operations - IndexResponseTests.assertDocWriteResponse(expected.getResponse(), actual.getResponse()); + assertEquals(((UpdateResponse) expectedDocResponse).getGetResult(), ((UpdateResponse)actualDocResponse).getGetResult()); } } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index cf41042ab8c..615ed7db5db 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.bulk; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.delete.DeleteRequest; @@ -42,6 +43,7 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -53,6 +55,8 @@ import static org.elasticsearch.action.DocWriteRequest.OpType; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.InternalSettingsPlugin; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -66,7 +70,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(CustomScriptPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { @@ -457,7 +461,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { */ public void testBulkUpdateChildMissingParentRouting() throws Exception { assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) // allows for multiple types .addMapping("parent", "{\"parent\":{}}", XContentType.JSON) .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON)); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index e6e18fb567d..32dfbe85d42 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -121,7 +121,8 @@ public class TransportBulkActionIngestTests extends ESTestCase { class TestSingleItemBulkWriteAction extends TransportSingleItemBulkWriteAction { TestSingleItemBulkWriteAction(TestTransportBulkAction bulkAction) { - super(Settings.EMPTY, IndexAction.NAME, transportService, TransportBulkActionIngestTests.this.clusterService, + super(Settings.EMPTY, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, + TransportBulkActionIngestTests.this.clusterService, null, null, null, new ActionFilters(Collections.emptySet()), null, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX, bulkAction, null); } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index aa7f613a176..ec437067442 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -35,8 +35,12 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -47,15 +51,19 @@ import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.rest.RestStatus; -import org.mockito.ArgumentCaptor; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongSupplier; import static org.elasticsearch.action.bulk.TransportShardBulkAction.replicaItemExecutionMode; +import static org.junit.Assert.assertNotNull; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; @@ -213,13 +221,13 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; - // Pretend the mappings haven't made it to the node yet, and throw a rejection - Exception err = new ReplicationOperation.RetryOnPrimaryException(shardId, "rejection"); + // Pretend the mappings haven't made it to the node yet, and throw a rejection + RuntimeException err = new ReplicationOperation.RetryOnPrimaryException(shardId, "rejection"); try { TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, 0, updateHelper, threadPool::absoluteTimeInMillis, - new ThrowingMappingUpdatePerformer(err)); + new ThrowingVerifyingMappingUpdatePerformer(err)); fail("should have thrown a retry exception"); } catch (ReplicationOperation.RetryOnPrimaryException e) { assertThat(e, equalTo(err)); @@ -243,7 +251,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { UpdateHelper updateHelper = null; // Return a mapping conflict (IAE) when trying to update the mapping - Exception err = new IllegalArgumentException("mapping conflict"); + RuntimeException err = new IllegalArgumentException("mapping conflict"); Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, 0, updateHelper, @@ -528,6 +536,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") ); final String failureMessage = "simulated primary failure"; + final IOException exception = new IOException(failureMessage); itemRequest.setPrimaryResponse(new BulkItemResponse(0, randomFrom( DocWriteRequest.OpType.CREATE, @@ -535,7 +544,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { DocWriteRequest.OpType.INDEX ), new BulkItemResponse.Failure("index", "type", "1", - new IOException(failureMessage), 1L) + exception, 1L) )); BulkItemRequest[] itemRequests = new BulkItemRequest[1]; itemRequests[0] = itemRequest; @@ -543,12 +552,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { shard.shardId(), RefreshPolicy.NONE, itemRequests); bulkShardRequest.primaryTerm(randomIntBetween(1, (int) shard.getPrimaryTerm())); TransportShardBulkAction.performOnReplica(bulkShardRequest, shard); - ArgumentCaptor noOp = ArgumentCaptor.forClass(Engine.NoOp.class); - verify(shard, times(1)).markSeqNoAsNoOp(noOp.capture()); - final Engine.NoOp noOpValue = noOp.getValue(); - assertThat(noOpValue.seqNo(), equalTo(1L)); - assertThat(noOpValue.primaryTerm(), equalTo(bulkShardRequest.primaryTerm())); - assertThat(noOpValue.reason(), containsString(failureMessage)); + verify(shard, times(1)).markSeqNoAsNoop(1, bulkShardRequest.primaryTerm(), exception.toString()); closeShards(shard); } @@ -565,16 +569,14 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard, new MappingUpdatePerformer() { @Override - public void updateMappings(Mapping update, ShardId shardId, - String type) throws Exception { + public void updateMappings(Mapping update, ShardId shardId, String type) { // There should indeed be a mapping update assertNotNull(update); updateCalled.incrementAndGet(); } @Override - public void verifyMappings(Mapping update, - ShardId shardId) throws Exception { + public void verifyMappings(Mapping update, ShardId shardId) { // No-op, will be called logger.info("--> verifying mappings noop"); verifyCalled.incrementAndGet(); @@ -584,9 +586,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { assertThat("mappings were \"updated\" once", updateCalled.get(), equalTo(1)); assertThat("mappings were \"verified\" once", verifyCalled.get(), equalTo(1)); - // Verify that the shard "prepared" the operation twice - verify(shard, times(2)).prepareIndexOnPrimary(any(), anyLong(), any(), - anyLong(), anyBoolean()); + // Verify that the shard "executed" the operation twice + verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean(), any()); // Update the mapping, so the next mapping updater doesn't do anything final MapperService mapperService = shard.mapperService(); @@ -596,22 +597,19 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard, new MappingUpdatePerformer() { @Override - public void updateMappings(Mapping update, ShardId shardId, - String type) throws Exception { + public void updateMappings(Mapping update, ShardId shardId, String type) { fail("should not have had to update the mappings"); } @Override - public void verifyMappings(Mapping update, - ShardId shardId) throws Exception { + public void verifyMappings(Mapping update, ShardId shardId) { fail("should not have had to update the mappings"); } }); - // Verify that the shard "prepared" the operation only once (2 for previous invocations plus + // Verify that the shard "executed" the operation only once (2 for previous invocations plus // 1 for this execution) - verify(shard, times(3)).prepareIndexOnPrimary(any(), anyLong(), any(), - anyLong(), anyBoolean()); + verify(shard, times(3)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean(), any()); closeShards(shard); } @@ -629,25 +627,199 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { } } - public void testPrepareIndexOpOnReplica() throws Exception { + public void testProcessUpdateResponse() throws Exception { IndexMetaData metaData = indexMetaData(); IndexShard shard = newStartedShard(false); - DocWriteResponse primaryResponse = new IndexResponse(shardId, "index", "id", 17, 0, 1, randomBoolean()); - IndexRequest request = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + BulkItemRequest request = new BulkItemRequest(0, updateRequest); + Exception err = new VersionConflictEngineException(shardId, "type", "id", + "I'm conflicted <(;_;)>"); + Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); + Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, true); + DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED; + DocWriteResponse.Result deleteWriteResult = DocWriteResponse.Result.DELETED; + IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + DeleteRequest deleteRequest = new DeleteRequest("index", "type", "id"); + UpdateHelper.Result translate = new UpdateHelper.Result(indexRequest, docWriteResult, + new HashMap(), XContentType.JSON); + UpdateHelper.Result translateDelete = new UpdateHelper.Result(deleteRequest, deleteWriteResult, + new HashMap(), XContentType.JSON); - Engine.Index op = TransportShardBulkAction.prepareIndexOperationOnReplica( - primaryResponse, request, shard.getPrimaryTerm(), shard); + BulkItemRequest[] itemRequests = new BulkItemRequest[1]; + itemRequests[0] = request; + BulkShardRequest bulkShardRequest = new BulkShardRequest(shard.shardId(), RefreshPolicy.NONE, itemRequests); - assertThat(op.version(), equalTo(primaryResponse.getVersion())); - assertThat(op.seqNo(), equalTo(primaryResponse.getSeqNo())); - assertThat(op.versionType(), equalTo(VersionType.EXTERNAL)); - assertThat(op.primaryTerm(), equalTo(shard.getPrimaryTerm())); + BulkItemResultHolder holder = TransportShardBulkAction.processUpdateResponse(updateRequest, + "index", indexResult, translate, shard, 7); + + assertTrue(holder.isVersionConflict()); + assertThat(holder.response, instanceOf(UpdateResponse.class)); + UpdateResponse updateResp = (UpdateResponse) holder.response; + assertThat(updateResp.getGetResult(), equalTo(null)); + assertThat(holder.operationResult, equalTo(indexResult)); + BulkItemRequest replicaBulkRequest = holder.replicaRequest; + assertThat(replicaBulkRequest.id(), equalTo(7)); + DocWriteRequest replicaRequest = replicaBulkRequest.request(); + assertThat(replicaRequest, instanceOf(IndexRequest.class)); + assertThat(replicaRequest, equalTo(indexRequest)); + + BulkItemResultHolder deleteHolder = TransportShardBulkAction.processUpdateResponse(updateRequest, + "index", deleteResult, translateDelete, shard, 8); + + assertFalse(deleteHolder.isVersionConflict()); + assertThat(deleteHolder.response, instanceOf(UpdateResponse.class)); + UpdateResponse delUpdateResp = (UpdateResponse) deleteHolder.response; + assertThat(delUpdateResp.getGetResult(), equalTo(null)); + assertThat(deleteHolder.operationResult, equalTo(deleteResult)); + BulkItemRequest delReplicaBulkRequest = deleteHolder.replicaRequest; + assertThat(delReplicaBulkRequest.id(), equalTo(8)); + DocWriteRequest delReplicaRequest = delReplicaBulkRequest.request(); + assertThat(delReplicaRequest, instanceOf(DeleteRequest.class)); + assertThat(delReplicaRequest, equalTo(deleteRequest)); closeShards(shard); } + public void testExecuteUpdateRequestOnce() throws Exception { + IndexMetaData metaData = indexMetaData(); + IndexShard shard = newStartedShard(true); + + Map source = new HashMap<>(); + source.put("foo", "bar"); + BulkItemRequest[] items = new BulkItemRequest[1]; + boolean create = randomBoolean(); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") + .create(create); + BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); + items[0] = primaryRequest; + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + + Translog.Location location = new Translog.Location(0, 0, 0); + IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + indexRequest.source(source); + + DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED; + UpdateHelper.Result translate = new UpdateHelper.Result(indexRequest, docWriteResult, + new HashMap(), XContentType.JSON); + UpdateHelper updateHelper = new MockUpdateHelper(translate); + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.upsert(source); + + BulkItemResultHolder holder = TransportShardBulkAction.executeUpdateRequestOnce(updateRequest, shard, metaData, + "index", updateHelper, threadPool::absoluteTimeInMillis, primaryRequest, 0, new NoopMappingUpdatePerformer()); + + assertFalse(holder.isVersionConflict()); + assertNotNull(holder.response); + assertNotNull(holder.operationResult); + assertNotNull(holder.replicaRequest); + + assertThat(holder.response, instanceOf(UpdateResponse.class)); + UpdateResponse updateResp = (UpdateResponse) holder.response; + assertThat(updateResp.getGetResult(), equalTo(null)); + BulkItemRequest replicaBulkRequest = holder.replicaRequest; + assertThat(replicaBulkRequest.id(), equalTo(0)); + DocWriteRequest replicaRequest = replicaBulkRequest.request(); + assertThat(replicaRequest, instanceOf(IndexRequest.class)); + assertThat(replicaRequest, equalTo(indexRequest)); + + // Assert that the document actually made it there + assertDocCount(shard, 1); + closeShards(shard); + } + + public void testExecuteUpdateRequestOnceWithFailure() throws Exception { + IndexMetaData metaData = indexMetaData(); + IndexShard shard = newStartedShard(true); + + Map source = new HashMap<>(); + source.put("foo", "bar"); + BulkItemRequest[] items = new BulkItemRequest[1]; + boolean create = randomBoolean(); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") + .create(create); + BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); + items[0] = primaryRequest; + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + + Translog.Location location = new Translog.Location(0, 0, 0); + IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + indexRequest.source(source); + + DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED; + Exception prepareFailure = new IllegalArgumentException("I failed to do something!"); + UpdateHelper updateHelper = new FailingUpdateHelper(prepareFailure); + UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + updateRequest.upsert(source); + + BulkItemResultHolder holder = TransportShardBulkAction.executeUpdateRequestOnce(updateRequest, shard, metaData, + "index", updateHelper, threadPool::absoluteTimeInMillis, primaryRequest, 0, new NoopMappingUpdatePerformer()); + + assertFalse(holder.isVersionConflict()); + assertNull(holder.response); + assertNotNull(holder.operationResult); + assertNotNull(holder.replicaRequest); + + Engine.IndexResult opResult = (Engine.IndexResult) holder.operationResult; + assertTrue(opResult.hasFailure()); + assertFalse(opResult.isCreated()); + Exception e = opResult.getFailure(); + assertThat(e.getMessage(), containsString("I failed to do something!")); + + BulkItemRequest replicaBulkRequest = holder.replicaRequest; + assertThat(replicaBulkRequest.id(), equalTo(0)); + assertThat(replicaBulkRequest.request(), instanceOf(IndexRequest.class)); + IndexRequest replicaRequest = (IndexRequest) replicaBulkRequest.request(); + assertThat(replicaRequest.index(), equalTo("index")); + assertThat(replicaRequest.type(), equalTo("type")); + assertThat(replicaRequest.id(), equalTo("id")); + assertThat(replicaRequest.sourceAsMap(), equalTo(source)); + + // Assert that the document did not make it there, since it should have failed + assertDocCount(shard, 0); + closeShards(shard); + } + + /** + * Fake UpdateHelper that always returns whatever result you give it + */ + private static class MockUpdateHelper extends UpdateHelper { + private final UpdateHelper.Result result; + + MockUpdateHelper(UpdateHelper.Result result) { + super(Settings.EMPTY, null); + this.result = result; + } + + @Override + public UpdateHelper.Result prepare(UpdateRequest u, IndexShard s, LongSupplier n) { + logger.info("--> preparing update for {} - {}", s, u); + return result; + } + } + + /** + * An update helper that always fails to prepare the update + */ + private static class FailingUpdateHelper extends UpdateHelper { + private final Exception e; + + FailingUpdateHelper(Exception failure) { + super(Settings.EMPTY, null); + this.e = failure; + } + + @Override + public UpdateHelper.Result prepare(UpdateRequest u, IndexShard s, LongSupplier n) { + logger.info("--> preparing failing update for {} - {}", s, u); + throw new ElasticsearchException(e); + } + } + /** * Fake IndexResult that has a settable translog location */ @@ -668,40 +840,40 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { /** Doesn't perform any mapping updates */ public static class NoopMappingUpdatePerformer implements MappingUpdatePerformer { - public void updateMappings(Mapping update, ShardId shardId, String type) throws Exception { + public void updateMappings(Mapping update, ShardId shardId, String type) { } - public void verifyMappings(Mapping update, ShardId shardId) throws Exception { + public void verifyMappings(Mapping update, ShardId shardId) { } } /** Always throw the given exception */ private class ThrowingMappingUpdatePerformer implements MappingUpdatePerformer { - private final Exception e; - ThrowingMappingUpdatePerformer(Exception e) { + private final RuntimeException e; + ThrowingMappingUpdatePerformer(RuntimeException e) { this.e = e; } - public void updateMappings(Mapping update, ShardId shardId, String type) throws Exception { + public void updateMappings(Mapping update, ShardId shardId, String type) { throw e; } - public void verifyMappings(Mapping update, ShardId shardId) throws Exception { + public void verifyMappings(Mapping update, ShardId shardId) { fail("should not have gotten to this point"); } } /** Always throw the given exception */ private class ThrowingVerifyingMappingUpdatePerformer implements MappingUpdatePerformer { - private final Exception e; - ThrowingVerifyingMappingUpdatePerformer(Exception e) { + private final RuntimeException e; + ThrowingVerifyingMappingUpdatePerformer(RuntimeException e) { this.e = e; } - public void updateMappings(Mapping update, ShardId shardId, String type) throws Exception { + public void updateMappings(Mapping update, ShardId shardId, String type) { } - public void verifyMappings(Mapping update, ShardId shardId) throws Exception { + public void verifyMappings(Mapping update, ShardId shardId) { throw e; } } diff --git a/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java b/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java index 95fbbe8ed14..b90ac66b420 100644 --- a/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java @@ -32,9 +32,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; import java.io.IOException; +import java.util.function.Predicate; import static org.elasticsearch.action.index.IndexResponseTests.assertDocWriteResponse; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; public class DeleteResponseTests extends ESTestCase { @@ -56,16 +58,40 @@ public class DeleteResponseTests extends ESTestCase { } public void testToAndFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { final Tuple tuple = randomDeleteResponse(); DeleteResponse deleteResponse = tuple.v1(); DeleteResponse expectedDeleteResponse = tuple.v2(); boolean humanReadable = randomBoolean(); final XContentType xContentType = randomFrom(XContentType.values()); - BytesReference deleteResponseBytes = toShuffledXContent(deleteResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference originalBytes = toShuffledXContent(deleteResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (addRandomFields) { + // The ShardInfo.Failure's exception is rendered out in a "reason" object. We shouldn't add anything random there + // because exception rendering and parsing are very permissive: any extra object or field would be rendered as + // a exception custom metadata and be parsed back as a custom header, making it impossible to compare the results + // in this test. + Predicate excludeFilter = path -> path.contains("reason"); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; + } DeleteResponse parsedDeleteResponse; - try (XContentParser parser = createParser(xContentType.xContent(), deleteResponseBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { parsedDeleteResponse = DeleteResponse.fromXContent(parser); assertNull(parser.nextToken()); } diff --git a/core/src/test/java/org/elasticsearch/action/get/GetResponseTests.java b/core/src/test/java/org/elasticsearch/action/get/GetResponseTests.java index f755d05fc7f..d607a473b9a 100644 --- a/core/src/test/java/org/elasticsearch/action/get/GetResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/get/GetResponseTests.java @@ -19,38 +19,65 @@ package org.elasticsearch.action.get; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.Collections; +import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.index.get.GetResultTests.copyGetResult; import static org.elasticsearch.index.get.GetResultTests.mutateGetResult; import static org.elasticsearch.index.get.GetResultTests.randomGetResult; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class GetResponseTests extends ESTestCase { public void testToAndFromXContent() throws Exception { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { XContentType xContentType = randomFrom(XContentType.values()); Tuple tuple = randomGetResult(xContentType); GetResponse getResponse = new GetResponse(tuple.v1()); GetResponse expectedGetResponse = new GetResponse(tuple.v2()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(getResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable, "_source"); - //test that we can parse what we print out + + BytesReference mutated; + if (addRandomFields) { + // "_source" and "fields" just consists of key/value pairs, we shouldn't add anything random there. It is already + // randomized in the randomGetResult() method anyway. Also, we cannot add anything in the root object since this is + // where GetResult's metadata fields are rendered out while // other fields are rendered out in a "fields" object. + Predicate excludeFilter = (s) -> s.isEmpty() || s.contains("fields") || s.contains("_source"); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; + } GetResponse parsedGetResponse; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { parsedGetResponse = GetResponse.fromXContent(parser); assertNull(parser.nextToken()); } @@ -65,7 +92,7 @@ public class GetResponseTests extends ESTestCase { public void testToXContent() { { GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 1, true, new BytesArray("{ \"field1\" : " + - "\"value1\", \"field2\":\"value2\"}"), Collections.singletonMap("field1", new GetField("field1", + "\"value1\", \"field2\":\"value2\"}"), Collections.singletonMap("field1", new DocumentField("field1", Collections.singletonList("value1"))))); String output = Strings.toString(getResponse); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"found\":true,\"_source\":{ \"field1\" " + @@ -81,7 +108,7 @@ public class GetResponseTests extends ESTestCase { public void testToString() { GetResponse getResponse = new GetResponse( new GetResult("index", "type", "id", 1, true, new BytesArray("{ \"field1\" : " + "\"value1\", \"field2\":\"value2\"}"), - Collections.singletonMap("field1", new GetField("field1", Collections.singletonList("value1"))))); + Collections.singletonMap("field1", new DocumentField("field1", Collections.singletonList("value1"))))); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"found\":true,\"_source\":{ \"field1\" " + ": \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", getResponse.toString()); } @@ -90,6 +117,19 @@ public class GetResponseTests extends ESTestCase { checkEqualsAndHashCode(new GetResponse(randomGetResult(XContentType.JSON).v1()), GetResponseTests::copyGetResponse, GetResponseTests::mutateGetResponse); } + + public void testFromXContentThrowsParsingException() throws IOException { + GetResponse getResponse = new GetResponse(new GetResult(null, null, null, randomIntBetween(1, 5), randomBoolean(), null, null)); + + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference originalBytes = toShuffledXContent(getResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + + try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + ParsingException exception = expectThrows(ParsingException.class, () -> GetResponse.fromXContent(parser)); + assertEquals("Missing required fields [_index,_type,_id]", exception.getMessage()); + } + } + private static GetResponse copyGetResponse(GetResponse getResponse) { return new GetResponse(copyGetResult(getResponse.getResult)); } diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java index 58947a7173e..feeded03f88 100644 --- a/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -33,9 +33,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; import java.io.IOException; +import java.util.function.Predicate; import static org.elasticsearch.action.support.replication.ReplicationResponseTests.assertShardInfo; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; public class IndexResponseTests extends ESTestCase { @@ -57,16 +59,40 @@ public class IndexResponseTests extends ESTestCase { } public void testToAndFromXContent() throws IOException { + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { final Tuple tuple = randomIndexResponse(); IndexResponse indexResponse = tuple.v1(); IndexResponse expectedIndexResponse = tuple.v2(); boolean humanReadable = randomBoolean(); XContentType xContentType = randomFrom(XContentType.values()); - BytesReference indexResponseBytes = toShuffledXContent(indexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference originalBytes = toShuffledXContent(indexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (addRandomFields) { + // The ShardInfo.Failure's exception is rendered out in a "reason" object. We shouldn't add anything random there + // because exception rendering and parsing are very permissive: any extra object or field would be rendered as + // a exception custom metadata and be parsed back as a custom header, making it impossible to compare the results + // in this test. + Predicate excludeFilter = path -> path.contains("reason"); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; + } IndexResponse parsedIndexResponse; - try (XContentParser parser = createParser(xContentType.xContent(), indexResponseBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { parsedIndexResponse = IndexResponse.fromXContent(parser); assertNull(parser.nextToken()); } diff --git a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index fef4cff6a4e..b27c7ec3955 100644 --- a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -27,7 +28,6 @@ import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; @@ -105,7 +105,7 @@ public class ExpandSearchPhaseTests extends ESTestCase { }; SearchHits hits = new SearchHits(new SearchHit[]{new SearchHit(1, "ID", new Text("type"), - Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(collapseValue))))}, + Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))))}, 1, 1.0F); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); AtomicReference reference = new AtomicReference<>(); @@ -160,9 +160,9 @@ public class ExpandSearchPhaseTests extends ESTestCase { }; SearchHits hits = new SearchHits(new SearchHit[]{new SearchHit(1, "ID", new Text("type"), - Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(collapseValue)))), + Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue)))), new SearchHit(2, "ID2", new Text("type"), - Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(collapseValue))))}, 1, + Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(collapseValue))))}, 1, 1.0F); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); AtomicReference reference = new AtomicReference<>(); @@ -194,9 +194,9 @@ public class ExpandSearchPhaseTests extends ESTestCase { }; SearchHits hits = new SearchHits(new SearchHit[]{new SearchHit(1, "ID", new Text("type"), - Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(null)))), + Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(null)))), new SearchHit(2, "ID2", new Text("type"), - Collections.singletonMap("someField", new SearchHitField("someField", Collections.singletonList(null))))}, 1, 1.0F); + Collections.singletonMap("someField", new DocumentField("someField", Collections.singletonList(null))))}, 1, 1.0F); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); AtomicReference reference = new AtomicReference<>(); ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, r -> diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index b35aac5f958..7a18ca4cff1 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import java.util.Arrays; @@ -64,6 +65,7 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase { * If the master node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits. * This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario. */ + @TestLogging("_root:DEBUG") public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable { logger.info("--> start 4 nodes, 3 master, 1 data"); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index f91fab381d3..a4a34b7002c 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -1161,7 +1161,7 @@ public class TransportReplicationActionTests extends ESTestCase { }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString()); doAnswer(invocation -> { long term = (Long)invocation.getArguments()[0]; - ActionListener callback = (ActionListener) invocation.getArguments()[1]; + ActionListener callback = (ActionListener) invocation.getArguments()[2]; final long primaryTerm = indexShard.getPrimaryTerm(); if (term < primaryTerm) { throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])", @@ -1170,7 +1170,7 @@ public class TransportReplicationActionTests extends ESTestCase { count.incrementAndGet(); callback.onResponse(count::decrementAndGet); return null; - }).when(indexShard).acquireReplicaOperationPermit(anyLong(), any(ActionListener.class), anyString()); + }).when(indexShard).acquireReplicaOperationPermit(anyLong(), anyLong(), any(ActionListener.class), anyString()); when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> { final ClusterState state = clusterService.state(); final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index f0690ad67b5..7e1ff9e1ca0 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -456,7 +456,7 @@ public class TransportWriteActionTests extends ESTestCase { count.incrementAndGet(); callback.onResponse(count::decrementAndGet); return null; - }).when(indexShard).acquireReplicaOperationPermit(anyLong(), any(ActionListener.class), anyString()); + }).when(indexShard).acquireReplicaOperationPermit(anyLong(), anyLong(), any(ActionListener.class), anyString()); when(indexShard.routingEntry()).thenAnswer(invocationOnMock -> { final ClusterState state = clusterService.state(); final RoutingNode node = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index ba488cecb38..29235329d66 100644 --- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -275,12 +275,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase { transport.handleLocalError(requestId, new ConnectTransportException(node, "test exception")); // wait until the timeout was triggered and we actually tried to send for the second time - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(transport.capturedRequests().length, equalTo(1)); - } - }); + assertBusy(() -> assertThat(transport.capturedRequests().length, equalTo(1))); // let it fail the second time too requestId = transport.capturedRequests()[0].requestId; diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 8b389d69d38..7049d0fa9e9 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -37,7 +38,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.MockScriptEngine; @@ -75,10 +75,8 @@ public class UpdateRequestTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - final Path genericConfigFolder = createTempDir(); final Settings baseSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) .build(); final Map, Object>> scripts = new HashMap<>(); scripts.put( @@ -534,9 +532,9 @@ public class UpdateRequestTests extends ESTestCase { assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); assertNull(UpdateHelper.calculateParent(getResult, indexRequest)); - Map fields = new HashMap<>(); - fields.put("_parent", new GetField("_parent", Collections.singletonList("parent1"))); - fields.put("_routing", new GetField("_routing", Collections.singletonList("routing1"))); + Map fields = new HashMap<>(); + fields.put("_parent", new DocumentField("_parent", Collections.singletonList("parent1"))); + fields.put("_routing", new DocumentField("_routing", Collections.singletonList("routing1"))); // Doc exists and has the parent and routing fields getResult = new GetResult("test", "type", "1", 0, true, null, fields); diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index 1c80ddca1c5..c8d63f73732 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -26,10 +26,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.get.GetResultTests; import org.elasticsearch.index.shard.ShardId; @@ -40,11 +40,15 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; import static org.elasticsearch.action.DocWriteResponse.Result.DELETED; import static org.elasticsearch.action.DocWriteResponse.Result.NOT_FOUND; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class UpdateResponseTests extends ESTestCase { @@ -64,9 +68,9 @@ public class UpdateResponseTests extends ESTestCase { } { BytesReference source = new BytesArray("{\"title\":\"Book title\",\"isbn\":\"ABC-123\"}"); - Map fields = new HashMap<>(); - fields.put("title", new GetField("title", Collections.singletonList("Book title"))); - fields.put("isbn", new GetField("isbn", Collections.singletonList("ABC-123"))); + Map fields = new HashMap<>(); + fields.put("title", new DocumentField("title", Collections.singletonList("Book title"))); + fields.put("isbn", new DocumentField("isbn", Collections.singletonList("ABC-123"))); UpdateResponse updateResponse = new UpdateResponse(new ReplicationResponse.ShardInfo(3, 2), new ShardId("books", "books_uuid", 2), "book", "1", 7, 17, 2, UPDATED); @@ -81,29 +85,59 @@ public class UpdateResponseTests extends ESTestCase { } public void testToAndFromXContent() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); + doFromXContentTestWithRandomFields(false); + } + + /** + * This test adds random fields and objects to the xContent rendered out to + * ensure we can parse it back to be forward compatible with additions to + * the xContent + */ + public void testFromXContentWithRandomFields() throws IOException { + doFromXContentTestWithRandomFields(true); + } + + private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { + final XContentType xContentType = randomFrom(XContentType.JSON); final Tuple tuple = randomUpdateResponse(xContentType); UpdateResponse updateResponse = tuple.v1(); UpdateResponse expectedUpdateResponse = tuple.v2(); boolean humanReadable = randomBoolean(); - BytesReference updateResponseBytes = toShuffledXContent(updateResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference originalBytes = toShuffledXContent(updateResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference mutated; + if (addRandomFields) { + // - The ShardInfo.Failure's exception is rendered out in a "reason" object. We shouldn't add anything random there + // because exception rendering and parsing are very permissive: any extra object or field would be rendered as + // a exception custom metadata and be parsed back as a custom header, making it impossible to compare the results + // in this test. + // - The GetResult's "_source" and "fields" just consists of key/value pairs, we shouldn't add anything random there. + // It is already randomized in the randomGetResult() method anyway. Also, we cannot add anything within the "get" + // object since this is where GetResult's metadata fields are rendered out and they would be parsed back as + // extra metadata fields. + Predicate excludeFilter = path -> path.contains("reason") || path.contains("get"); + mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + } else { + mutated = originalBytes; + } UpdateResponse parsedUpdateResponse; - try (XContentParser parser = createParser(xContentType.xContent(), updateResponseBytes)) { + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { parsedUpdateResponse = UpdateResponse.fromXContent(parser); assertNull(parser.nextToken()); } - // We can't use equals() to compare the original and the parsed delete response - // because the random delete response can contain shard failures with exceptions, - // and those exceptions are not parsed back with the same types. - assertUpdateResponse(expectedUpdateResponse, parsedUpdateResponse); - } + IndexResponseTests.assertDocWriteResponse(expectedUpdateResponse, parsedUpdateResponse); + if (addRandomFields == false) { + assertEquals(expectedUpdateResponse.getGetResult(), parsedUpdateResponse.getGetResult()); + } - public static void assertUpdateResponse(UpdateResponse expected, UpdateResponse actual) { - IndexResponseTests.assertDocWriteResponse(expected, actual); - assertEquals(expected.getGetResult(), actual.getGetResult()); + // Prints out the parsed UpdateResponse object to verify that it is the same as the expected output. + // If random fields have been inserted, it checks that they have been filtered out and that they do + // not alter the final output of the parsed object. + BytesReference parsedBytes = toXContent(parsedUpdateResponse, xContentType, humanReadable); + BytesReference expectedBytes = toXContent(expectedUpdateResponse, xContentType, humanReadable); + assertToXContentEquivalent(expectedBytes, parsedBytes, xContentType); } /** diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java deleted file mode 100644 index 21dd76b67e6..00000000000 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ /dev/null @@ -1,477 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bwcompat; - -import org.apache.lucene.search.Explanation; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; -import org.elasticsearch.Version; -import org.elasticsearch.VersionTests; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.gateway.MetaDataStateFormat; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; -import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.OldIndexUtils; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.hamcrest.Matchers; -import org.junit.AfterClass; -import org.junit.Before; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; - -import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; -import static org.elasticsearch.test.OldIndexUtils.assertUpgradeWorks; -import static org.elasticsearch.test.OldIndexUtils.getIndexDir; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -// needs at least 2 nodes since it bumps replicas to 1 -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) -@LuceneTestCase.SuppressFileSystems("ExtrasFS") -public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { - // TODO: test for proper exception on unsupported indexes (maybe via separate test?) - // We have a 0.20.6.zip etc for this. - - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); - } - - List indexes; - List unsupportedIndexes; - static String singleDataPathNodeName; - static String multiDataPathNodeName; - static Path singleDataPath; - static Path[] multiDataPath; - - @Before - public void initIndexesList() throws Exception { - indexes = OldIndexUtils.loadDataFilesList("index", getBwcIndicesPath()); - unsupportedIndexes = OldIndexUtils.loadDataFilesList("unsupported", getBwcIndicesPath()); - } - - @AfterClass - public static void tearDownStatics() { - singleDataPathNodeName = null; - multiDataPathNodeName = null; - singleDataPath = null; - multiDataPath = null; - } - - @Override - public Settings nodeSettings(int ord) { - return OldIndexUtils.getSettings(); - } - - void setupCluster() throws Exception { - List replicas = internalCluster().startNodes(1); // for replicas - - Path baseTempDir = createTempDir(); - // start single data path node - Settings.Builder nodeSettings = Settings.builder() - .put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath()) - .put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master - singleDataPathNodeName = internalCluster().startNode(nodeSettings); - - // start multi data path node - nodeSettings = Settings.builder() - .put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir - .resolve("multi-path2").toAbsolutePath()) - .put(Node.NODE_MASTER_SETTING.getKey(), false); // workaround for dangling index loading issue when node is master - multiDataPathNodeName = internalCluster().startNode(nodeSettings); - - // find single data path dir - Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName).nodeDataPaths(); - assertEquals(1, nodePaths.length); - singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER); - assertFalse(Files.exists(singleDataPath)); - Files.createDirectories(singleDataPath); - logger.info("--> Single data path: {}", singleDataPath); - - // find multi data path dirs - nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths(); - assertEquals(2, nodePaths.length); - multiDataPath = new Path[]{nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER), - nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)}; - assertFalse(Files.exists(multiDataPath[0])); - assertFalse(Files.exists(multiDataPath[1])); - Files.createDirectories(multiDataPath[0]); - Files.createDirectories(multiDataPath[1]); - logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]); - ensureGreen(); - } - - void upgradeIndexFolder() throws Exception { - OldIndexUtils.upgradeIndexFolder(internalCluster(), singleDataPathNodeName); - OldIndexUtils.upgradeIndexFolder(internalCluster(), multiDataPathNodeName); - } - - void importIndex(String indexName) throws IOException { - // force reloading dangling indices with a cluster state republish - client().admin().cluster().prepareReroute().get(); - ensureGreen(indexName); - } - - void unloadIndex(String indexName) throws Exception { - assertAcked(client().admin().indices().prepareDelete(indexName).get()); - } - - public void testAllVersionsTested() throws Exception { - SortedSet expectedVersions = new TreeSet<>(); - for (Version v : VersionUtils.allReleasedVersions()) { - // The current version is in the "released" list even though it isn't released for historical reasons - if (v == Version.CURRENT) continue; - if (v.isRelease() == false) continue; // no guarantees for prereleases - if (v.before(Version.CURRENT.minimumIndexCompatibilityVersion())) continue; // we can only support one major version backward - if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself - expectedVersions.add("index-" + v.toString() + ".zip"); - } - - for (String index : indexes) { - if (expectedVersions.remove(index) == false) { - logger.warn("Old indexes tests contain extra index: {}", index); - } - } - if (expectedVersions.isEmpty() == false) { - StringBuilder msg = new StringBuilder("Old index tests are missing indexes:"); - for (String expected : expectedVersions) { - msg.append("\n" + expected); - } - fail(msg.toString()); - } - } - - public void testOldIndexes() throws Exception { - setupCluster(); - - Collections.shuffle(indexes, random()); - for (String index : indexes) { - long startTime = System.currentTimeMillis(); - logger.info("--> Testing old index {}", index); - assertOldIndexWorks(index); - logger.info("--> Done testing {}, took {} seconds", index, (System.currentTimeMillis() - startTime) / 1000.0); - } - } - - void assertOldIndexWorks(String index) throws Exception { - Version version = OldIndexUtils.extractVersion(index); - Path[] paths; - if (randomBoolean()) { - logger.info("--> injecting index [{}] into single data path", index); - paths = new Path[]{singleDataPath}; - } else { - logger.info("--> injecting index [{}] into multi data path", index); - paths = multiDataPath; - } - - String indexName = index.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); - OldIndexUtils.loadIndex(indexName, index, createTempDir(), getBwcIndicesPath(), logger, paths); - // we explicitly upgrade the index folders as these indices - // are imported as dangling indices and not available on - // node startup - upgradeIndexFolder(); - importIndex(indexName); - assertAllSearchWorks(indexName); - assertBasicAggregationWorks(indexName); - assertRealtimeGetWorks(indexName); - assertNewReplicasWork(indexName); - assertUpgradeWorks(client(), indexName, version); - assertPositionIncrementGapDefaults(indexName, version); - assertAliasWithBadName(indexName, version); - assertStoredBinaryFields(indexName, version); - unloadIndex(indexName); - } - - boolean findPayloadBoostInExplanation(Explanation expl) { - if (expl.getDescription().startsWith("payloadBoost=") && expl.getValue() != 1f) { - return true; - } else { - boolean found = false; - for (Explanation sub : expl.getDetails()) { - found |= findPayloadBoostInExplanation(sub); - } - return found; - } - } - - void assertAllSearchWorks(String indexName) { - logger.info("--> testing _all search"); - SearchResponse searchRsp = client().prepareSearch(indexName).get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); - SearchHit bestHit = searchRsp.getHits().getAt(0); - - // Make sure there are payloads and they are taken into account for the score - // the 'string' field has a boost of 4 in the mappings so it should get a payload boost - String stringValue = (String) bestHit.getSourceAsMap().get("string"); - assertNotNull(stringValue); - Explanation explanation = client().prepareExplain(indexName, bestHit.getType(), bestHit.getId()) - .setQuery(QueryBuilders.matchQuery("_all", stringValue)).get().getExplanation(); - assertTrue("Could not find payload boost in explanation\n" + explanation, findPayloadBoostInExplanation(explanation)); - - // Make sure the query can run on the whole index - searchRsp = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("_all", stringValue)).setExplain(true).get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); - } - - void assertBasicAggregationWorks(String indexName) { - // histogram on a long - SearchResponse searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.histogram("histo").field - ("long_sort").interval(10)).get(); - ElasticsearchAssertions.assertSearchResponse(searchRsp); - Histogram histo = searchRsp.getAggregations().get("histo"); - assertNotNull(histo); - long totalCount = 0; - for (Histogram.Bucket bucket : histo.getBuckets()) { - totalCount += bucket.getDocCount(); - } - assertEquals(totalCount, searchRsp.getHits().getTotalHits()); - - // terms on a boolean - searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.terms("bool_terms").field("bool")).get(); - Terms terms = searchRsp.getAggregations().get("bool_terms"); - totalCount = 0; - for (Terms.Bucket bucket : terms.getBuckets()) { - totalCount += bucket.getDocCount(); - } - assertEquals(totalCount, searchRsp.getHits().getTotalHits()); - } - - void assertRealtimeGetWorks(String indexName) { - assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() - .put("refresh_interval", -1) - .build())); - SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery()); - SearchHit hit = searchReq.get().getHits().getAt(0); - String docId = hit.getId(); - // foo is new, it is not a field in the generated index - client().prepareUpdate(indexName, "doc", docId).setDoc(Requests.INDEX_CONTENT_TYPE, "foo", "bar").get(); - GetResponse getRsp = client().prepareGet(indexName, "doc", docId).get(); - Map source = getRsp.getSourceAsMap(); - assertThat(source, Matchers.hasKey("foo")); - - assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() - .put("refresh_interval", IndexSettings.DEFAULT_REFRESH_INTERVAL) - .build())); - } - - void assertNewReplicasWork(String indexName) throws Exception { - final int numReplicas = 1; - final long startTime = System.currentTimeMillis(); - logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, indexName); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() - .put("number_of_replicas", numReplicas) - ).execute().actionGet()); - ensureGreen(TimeValue.timeValueMinutes(2), indexName); - logger.debug("--> index [{}] is green, took [{}]", indexName, TimeValue.timeValueMillis(System.currentTimeMillis() - startTime)); - logger.debug("--> recovery status:\n{}", XContentHelper.toString(client().admin().indices().prepareRecoveries(indexName).get())); - - // TODO: do something with the replicas! query? index? - } - - void assertPositionIncrementGapDefaults(String indexName, Version version) throws Exception { - client().prepareIndex(indexName, "doc", "position_gap_test").setSource("string", Arrays.asList("one", "two three")) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - - // Baseline - phrase query finds matches in the same field value - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "two three")).get(), 1); - - // No match across gaps when slop < position gap - assertHitCount( - client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two").slop(99)).get(), - 0); - - // Match across gaps when slop >= position gap - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two").slop(100)).get(), 1); - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two").slop(101)).get(), - 1); - - // No match across gap using default slop with default positionIncrementGap - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two")).get(), 0); - - // Nor with small-ish values - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two").slop(5)).get(), 0); - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two").slop(50)).get(), 0); - - // But huge-ish values still match - assertHitCount(client().prepareSearch(indexName).setQuery(matchPhraseQuery("string", "one two").slop(500)).get(), 1); - } - - private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); - - public void testUnreleasedVersion() { - VersionTests.assertUnknownVersion(VERSION_5_1_0_UNRELEASED); - } - - /** - * Search on an alias that contains illegal characters that would prevent it from being created after 5.1.0. It should still be - * search-able though. - */ - void assertAliasWithBadName(String indexName, Version version) throws Exception { - if (version.onOrAfter(VERSION_5_1_0_UNRELEASED)) { - return; - } - // We can read from the alias just like we can read from the index. - String aliasName = "#" + indexName; - long totalDocs = client().prepareSearch(indexName).setSize(0).get().getHits().getTotalHits(); - assertHitCount(client().prepareSearch(aliasName).setSize(0).get(), totalDocs); - assertThat(totalDocs, greaterThanOrEqualTo(2000L)); - - // We can remove the alias. - assertAcked(client().admin().indices().prepareAliases().removeAlias(indexName, aliasName).get()); - assertFalse(client().admin().indices().prepareAliasesExist(aliasName).get().exists()); - } - - /** - * Make sure we can load stored binary fields. - */ - void assertStoredBinaryFields(String indexName, Version version) throws Exception { - SearchRequestBuilder builder = client().prepareSearch(indexName); - builder.setQuery(QueryBuilders.matchAllQuery()); - builder.setSize(100); - builder.addStoredField("binary"); - SearchHits hits = builder.get().getHits(); - assertEquals(100, hits.getHits().length); - for(SearchHit hit : hits) { - SearchHitField field = hit.field("binary"); - assertNotNull(field); - Object value = field.getValue(); - assertTrue(value instanceof BytesArray); - assertEquals(16, ((BytesArray) value).length()); - } - } - - private Path getNodeDir(String indexFile) throws IOException { - Path unzipDir = createTempDir(); - Path unzipDataDir = unzipDir.resolve("data"); - - // decompress the index - Path backwardsIndex = getBwcIndicesPath().resolve(indexFile); - try (InputStream stream = Files.newInputStream(backwardsIndex)) { - TestUtil.unzip(stream, unzipDir); - } - - // check it is unique - assertTrue(Files.exists(unzipDataDir)); - Path[] list = FileSystemUtils.files(unzipDataDir); - if (list.length != 1) { - throw new IllegalStateException("Backwards index must contain exactly one cluster"); - } - - int zipIndex = indexFile.indexOf(".zip"); - final Version version = Version.fromString(indexFile.substring("index-".length(), zipIndex)); - if (version.before(Version.V_5_0_0_alpha1)) { - // the bwc scripts packs the indices under this path - return list[0].resolve("nodes/0/"); - } else { - // after 5.0.0, data folders do not include the cluster name - return list[0].resolve("0"); - } - } - - public void testOldClusterStates() throws Exception { - // dangling indices do not load the global state, only the per-index states - // so we make sure we can read them separately - MetaDataStateFormat globalFormat = new MetaDataStateFormat(XContentType.JSON, "global-") { - - @Override - public void toXContent(XContentBuilder builder, MetaData state) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public MetaData fromXContent(XContentParser parser) throws IOException { - return MetaData.Builder.fromXContent(parser); - } - }; - MetaDataStateFormat indexFormat = new MetaDataStateFormat(XContentType.JSON, "state-") { - - @Override - public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public IndexMetaData fromXContent(XContentParser parser) throws IOException { - return IndexMetaData.Builder.fromXContent(parser); - } - }; - Collections.shuffle(indexes, random()); - for (String indexFile : indexes) { - String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); - Path nodeDir = getNodeDir(indexFile); - logger.info("Parsing cluster state files from index [{}]", indexName); - final MetaData metaData = globalFormat.loadLatestState(logger, xContentRegistry(), nodeDir); - assertNotNull(metaData); - - final Version version = Version.fromString(indexName.substring("index-".length())); - final Path dataDir; - if (version.before(Version.V_5_0_0_alpha1)) { - dataDir = nodeDir.getParent().getParent(); - } else { - dataDir = nodeDir.getParent(); - } - final Path indexDir = getIndexDir(logger, indexName, indexFile, dataDir); - assertNotNull(indexFormat.loadLatestState(logger, xContentRegistry(), indexDir)); - } - } - -} diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java deleted file mode 100644 index 92c8b2315cc..00000000000 --- a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bwcompat; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.FileTestUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; -import org.elasticsearch.snapshots.SnapshotId; -import org.elasticsearch.snapshots.SnapshotInfo; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; - -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Set; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -/** - * Tests that a repository can handle both snapshots of previous version formats and new version formats, - * as blob names and repository blob formats have changed between the snapshot versions. - */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -// this test sometimes fails in recovery when the recovery is reset, increasing the logging level to help debug -@TestLogging("org.elasticsearch.indices.recovery:DEBUG") -public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase { - - /** - * This tests that a repository can inter-operate with snapshots that both have and don't have a UUID, - * namely when a repository was created in an older version with snapshots created in the old format - * (only snapshot name, no UUID) and then the repository is loaded into newer versions where subsequent - * snapshots have a name and a UUID. - */ - public void testRepositoryWorksWithCrossVersions() throws Exception { - final List repoVersions = listRepoVersions(); - // run the test for each supported version - for (final String version : repoVersions) { - final String repoName = "test-repo-" + version; - logger.info("--> creating repository [{}] for version [{}]", repoName, version); - createRepository(version, repoName); - - logger.info("--> get the snapshots"); - final String originalIndex = "index-" + version; - final Set indices = Sets.newHashSet(originalIndex); - final Set snapshotInfos = Sets.newHashSet(getSnapshots(repoName)); - assertThat(snapshotInfos.size(), equalTo(1)); - SnapshotInfo originalSnapshot = snapshotInfos.iterator().next(); - if (Version.fromString(version).before(Version.V_5_0_0_alpha1)) { - assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", "test_1"))); - } else { - assertThat(originalSnapshot.snapshotId().getName(), equalTo("test_1")); - assertNotNull(originalSnapshot.snapshotId().getUUID()); // it's a random UUID now - } - assertThat(Sets.newHashSet(originalSnapshot.indices()), equalTo(indices)); - - logger.info("--> restore the original snapshot"); - final Set restoredIndices = Sets.newHashSet( - restoreSnapshot(repoName, originalSnapshot.snapshotId().getName()) - ); - assertThat(restoredIndices, equalTo(indices)); - // make sure it has documents - for (final String searchIdx : restoredIndices) { - assertThat(client().prepareSearch(searchIdx).setSize(0).get().getHits().getTotalHits(), greaterThan(0L)); - } - deleteIndices(restoredIndices); // delete so we can restore again later - - final String snapshotName2 = "test_2"; - logger.info("--> take a new snapshot of the old index"); - final int addedDocSize = 10; - for (int i = 0; i < addedDocSize; i++) { - index(originalIndex, "doc", Integer.toString(i), "foo", "new-bar-" + i); - } - refresh(); - snapshotInfos.add(createSnapshot(repoName, snapshotName2)); - - logger.info("--> get the snapshots with the newly created snapshot [{}]", snapshotName2); - Set snapshotInfosFromRepo = Sets.newHashSet(getSnapshots(repoName)); - assertThat(snapshotInfosFromRepo, equalTo(snapshotInfos)); - snapshotInfosFromRepo.forEach(snapshotInfo -> { - assertThat(Sets.newHashSet(snapshotInfo.indices()), equalTo(indices)); - }); - - final String snapshotName3 = "test_3"; - final String indexName2 = "index2"; - logger.info("--> take a new snapshot with a new index"); - createIndex(indexName2); - indices.add(indexName2); - for (int i = 0; i < addedDocSize; i++) { - index(indexName2, "doc", Integer.toString(i), "foo", "new-bar-" + i); - } - refresh(); - snapshotInfos.add(createSnapshot(repoName, snapshotName3)); - - logger.info("--> get the snapshots with the newly created snapshot [{}]", snapshotName3); - snapshotInfosFromRepo = Sets.newHashSet(getSnapshots(repoName)); - assertThat(snapshotInfosFromRepo, equalTo(snapshotInfos)); - snapshotInfosFromRepo.forEach(snapshotInfo -> { - if (snapshotInfo.snapshotId().getName().equals(snapshotName3)) { - // only the last snapshot has all the indices - assertThat(Sets.newHashSet(snapshotInfo.indices()), equalTo(indices)); - } else { - assertThat(Sets.newHashSet(snapshotInfo.indices()), equalTo(Sets.newHashSet(originalIndex))); - } - }); - deleteIndices(indices); // clean up indices - - logger.info("--> restore the old snapshot again"); - Set oldRestoredIndices = Sets.newHashSet(restoreSnapshot(repoName, originalSnapshot.snapshotId().getName())); - assertThat(oldRestoredIndices, equalTo(Sets.newHashSet(originalIndex))); - for (final String searchIdx : oldRestoredIndices) { - assertThat(client().prepareSearch(searchIdx).setSize(0).get().getHits().getTotalHits(), - greaterThanOrEqualTo((long)addedDocSize)); - } - deleteIndices(oldRestoredIndices); - - logger.info("--> restore the new snapshot"); - Set newSnapshotIndices = Sets.newHashSet(restoreSnapshot(repoName, snapshotName3)); - assertThat(newSnapshotIndices, equalTo(Sets.newHashSet(originalIndex, indexName2))); - for (final String searchIdx : newSnapshotIndices) { - assertThat(client().prepareSearch(searchIdx).setSize(0).get().getHits().getTotalHits(), - greaterThanOrEqualTo((long)addedDocSize)); - } - deleteIndices(newSnapshotIndices); // clean up indices before starting again - } - } - - private List listRepoVersions() throws Exception { - final String prefix = "repo"; - final List repoVersions = new ArrayList<>(); - final Path repoFiles = getBwcIndicesPath(); - try (DirectoryStream dirStream = Files.newDirectoryStream(repoFiles, prefix + "-*.zip")) { - for (final Path entry : dirStream) { - final String fileName = entry.getFileName().toString(); - String version = fileName.substring(prefix.length() + 1); - version = version.substring(0, version.length() - ".zip".length()); - repoVersions.add(version); - } - } - return Collections.unmodifiableList(repoVersions); - } - - private void createRepository(final String version, final String repoName) throws Exception { - final String prefix = "repo"; - final Path repoFile = getBwcIndicesPath().resolve(prefix + "-" + version + ".zip"); - final Path repoPath = randomRepoPath(); - FileTestUtils.unzip(repoFile, repoPath, "repo/"); - assertAcked(client().admin().cluster().preparePutRepository(repoName) - .setType("fs") - .setSettings(Settings.builder().put("location", repoPath))); - } - - private List getSnapshots(final String repoName) throws Exception { - return client().admin().cluster().prepareGetSnapshots(repoName) - .addSnapshots("_all") - .get() - .getSnapshots(); - } - - private SnapshotInfo createSnapshot(final String repoName, final String snapshotName) throws Exception { - return client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .get() - .getSnapshotInfo(); - } - - private List restoreSnapshot(final String repoName, final String snapshotName) throws Exception { - return client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .get() - .getRestoreInfo() - .indices(); - } - - private void deleteIndices(final Set indices) throws Exception { - client().admin().indices().prepareDelete(indices.toArray(new String[indices.size()])).get(); - } - -} diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 9ee8fa654b2..cd1a1336433 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -18,27 +18,15 @@ */ package org.elasticsearch.bwcompat; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; -import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.common.io.FileTestUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; -import org.elasticsearch.snapshots.RestoreInfo; -import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotRestoreException; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.VersionUtils; import org.junit.BeforeClass; import java.io.IOException; @@ -46,19 +34,15 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; -import java.util.Collections; import java.util.List; -import java.util.Locale; -import java.util.SortedSet; -import java.util.TreeSet; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.notNullValue; +/** + * Tests that restoring from a very old snapshot fails appropriately. + */ @ClusterScope(scope = Scope.TEST) public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { @@ -77,40 +61,6 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { repoPath = createTempDir("repositories"); } - public void testRestoreOldSnapshots() throws Exception { - String repo = "test_repo"; - String snapshot = "test_1"; - List repoVersions = repoVersions(); - assertThat(repoVersions.size(), greaterThan(0)); - for (String version : repoVersions) { - createRepo("repo", version, repo); - testOldSnapshot(version, repo, snapshot); - } - - SortedSet expectedVersions = new TreeSet<>(); - for (Version v : VersionUtils.allReleasedVersions()) { - // The current version is in the "released" list even though it isn't released for historical reasons - if (v == Version.CURRENT) continue; - if (v.isRelease() == false) continue; // no guarantees for prereleases - if (v.before(Version.CURRENT.minimumIndexCompatibilityVersion())) continue; // we only support versions N and N-1 - if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself - expectedVersions.add(v.toString()); - } - - for (String repoVersion : repoVersions) { - if (expectedVersions.remove(repoVersion) == false) { - logger.warn("Old repositories tests contain extra repo: {}", repoVersion); - } - } - if (expectedVersions.isEmpty() == false) { - StringBuilder msg = new StringBuilder("Old repositories tests are missing versions:"); - for (String expected : expectedVersions) { - msg.append("\n" + expected); - } - fail(msg.toString()); - } - } - public void testRestoreUnsupportedSnapshots() throws Exception { String repo = "test_repo"; String snapshot = "test_1"; @@ -122,10 +72,6 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { } } - private List repoVersions() throws Exception { - return listRepoVersions("repo"); - } - private List unsupportedRepoVersions() throws Exception { return listRepoVersions("unsupportedrepo"); } @@ -155,65 +101,6 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { .put(FsRepository.REPOSITORIES_LOCATION_SETTING.getKey(), fsRepoPath.getParent().relativize(fsRepoPath).resolve("repo").toString()))); } - private void testOldSnapshot(String version, String repo, String snapshot) throws IOException { - logger.info("--> get snapshot and check its version"); - GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get(); - assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); - SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); - assertThat(snapshotInfo.version().toString(), equalTo(version)); - - logger.info("--> get less verbose snapshot info"); - getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo) - .setSnapshots(snapshot).setVerbose(false).get(); - assertEquals(1, getSnapshotsResponse.getSnapshots().size()); - snapshotInfo = getSnapshotsResponse.getSnapshots().get(0); - assertEquals(snapshot, snapshotInfo.snapshotId().getName()); - assertNull(snapshotInfo.version()); // in verbose=false mode, version doesn't exist - - logger.info("--> restoring snapshot"); - RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get(); - assertThat(response.status(), equalTo(RestStatus.OK)); - RestoreInfo restoreInfo = response.getRestoreInfo(); - assertThat(restoreInfo.successfulShards(), greaterThan(0)); - assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards())); - assertThat(restoreInfo.failedShards(), equalTo(0)); - String index = restoreInfo.indices().get(0); - - logger.info("--> check search"); - SearchResponse searchResponse = client().prepareSearch(index).get(); - assertThat(searchResponse.getHits().getTotalHits(), greaterThan(1L)); - - logger.info("--> check settings"); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "version_attr"), equalTo(version)); - - logger.info("--> check templates"); - IndexTemplateMetaData template = clusterState.getMetaData().templates().get("template_" + version.toLowerCase(Locale.ROOT)); - assertThat(template, notNullValue()); - assertThat(template.patterns(), equalTo(Collections.singletonList("te*"))); - assertThat(template.settings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), equalTo(1)); - assertThat(template.mappings().size(), equalTo(1)); - assertThat(template.mappings().get("type1").string(), - anyOf( - equalTo("{\"type1\":{\"_source\":{\"enabled\":false}}}"), - equalTo("{\"type1\":{\"_source\":{\"enabled\":\"false\"}}}"), - equalTo("{\"type1\":{\"_source\":{\"enabled\":\"0\"}}}"), - equalTo("{\"type1\":{\"_source\":{\"enabled\":0}}}"), - equalTo("{\"type1\":{\"_source\":{\"enabled\":\"off\"}}}"), - equalTo("{\"type1\":{\"_source\":{\"enabled\":\"no\"}}}") - )); - assertThat(template.aliases().size(), equalTo(3)); - assertThat(template.aliases().get("alias1"), notNullValue()); - assertThat(template.aliases().get("alias2").filter().string(), containsString(version)); - assertThat(template.aliases().get("alias2").indexRouting(), equalTo("kimchy")); - assertThat(template.aliases().get("{index}-alias"), notNullValue()); - - logger.info("--> cleanup"); - cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()])); - cluster().wipeTemplates(); - - } - private void assertUnsupportedIndexFailsToRestore(String repo, String snapshot) throws IOException { logger.info("--> restoring unsupported snapshot"); try { diff --git a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 142216bf2dd..dbe85898209 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -41,6 +41,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.TransportStats; import java.io.IOException; import java.net.UnknownHostException; @@ -193,11 +194,6 @@ abstract class FailAndRetryMockTransport imp } - @Override - public long serverOpen() { - return 0; - } - @Override public Lifecycle.State lifecycleState() { return null; @@ -231,4 +227,9 @@ abstract class FailAndRetryMockTransport imp public long newRequestId() { return requestId.incrementAndGet(); } + + @Override + public TransportStats getStats() { + throw new UnsupportedOperationException(); + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 3fc67f3eb0e..a923b331042 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -158,12 +158,9 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { } internalCluster().stopRandomNonMasterNode(); - assertBusy(new Runnable() { - @Override - public void run() { - ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); - assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true)); - } + assertBusy(() -> { + ClusterState state1 = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + assertThat(state1.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true)); }); logger.info("--> starting the previous master node again..."); @@ -191,6 +188,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25415") public void testMultipleNodesShutdownNonMasterNodes() throws Exception { Settings settings = Settings.builder() .put("discovery.zen.minimum_master_nodes", 3) @@ -405,12 +403,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { latch.await(); assertThat(failure.get(), instanceOf(Discovery.FailedToCommitClusterStateException.class)); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(masterClusterService.state().nodes().getMasterNode(), nullValue()); - } - }); + assertBusy(() -> assertThat(masterClusterService.state().nodes().getMasterNode(), nullValue())); partition.stopDisrupting(); diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index a1b80803e0c..2e7a857cc7b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.TransportStats; import org.junit.After; import org.junit.Before; @@ -241,11 +242,6 @@ public class NodeConnectionsServiceTests extends ESTestCase { return getConnection(node); } - @Override - public long serverOpen() { - return 0; - } - @Override public List getLocalAddresses() { return null; @@ -263,12 +259,10 @@ public class NodeConnectionsServiceTests extends ESTestCase { @Override public void addLifecycleListener(LifecycleListener listener) { - } @Override public void removeLifecycleListener(LifecycleListener listener) { - } @Override @@ -279,5 +273,10 @@ public class NodeConnectionsServiceTests extends ESTestCase { @Override public void close() {} + + @Override + public TransportStats getStats() { + throw new UnsupportedOperationException(); + } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 7b11f96ac4d..fa56c756fcc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -32,6 +32,9 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Set; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + public class IndexMetaDataTests extends ESTestCase { public void testIndexMetaDataSerialization() throws IOException { @@ -121,4 +124,36 @@ public class IndexMetaDataTests extends ESTestCase { assertEquals("the number of target shards (8) must be greater than the shard id: 8", expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectShrinkShards(8, metaData, 8)).getMessage()); } + + public void testIndexFormat() { + Settings defaultSettings = Settings.builder() + .put("index.version.created", 1) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .build(); + + // matching version + { + IndexMetaData metaData = IndexMetaData.builder("foo") + .settings(Settings.builder() + .put(defaultSettings) + // intentionally not using the constant, so upgrading requires you to look at this test + // where you have to update this part and the next one + .put("index.format", 6) + .build()) + .build(); + + assertThat(metaData.getSettings().getAsInt(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 0), is(6)); + } + + // no setting configured + { + IndexMetaData metaData = IndexMetaData.builder("foo") + .settings(Settings.builder() + .put(defaultSettings) + .build()) + .build(); + assertThat(metaData.getSettings().getAsInt(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 0), is(0)); + } + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 31e421769c2..4ad4de495ca 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -1025,21 +1027,99 @@ public class IndexNameExpressionResolverTests extends ESTestCase { assertArrayEquals(new String[] {"test-alias-0", "test-alias-1", "test-alias-non-filtering"}, strings); } - public void testConcreteIndicesForDeprecatedPattern() { + public void testDeleteIndexIgnoresAliases() { MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX").state(State.OPEN)) - .put(indexBuilder("testXXY").state(State.OPEN)) - .put(indexBuilder("testYYY").state(State.OPEN)); + .put(indexBuilder("test-index").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias"))) + .put(indexBuilder("index").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias2"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + { + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, new DeleteIndexRequest("test-alias")); + assertEquals(0, indices.length); + } + { + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, new DeleteIndexRequest("test-a*")); + assertEquals(0, indices.length); + } + { + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, new DeleteIndexRequest("test-index")); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + { + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, new DeleteIndexRequest("test-*")); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + } - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, - IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "+testX*")), - equalTo(newHashSet("testXXX", "testXXY"))); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "+testXXX", "+testXXY", "+testYYY", "-testYYY")), - equalTo(newHashSet("testXXX", "testXXY", "testYYY"))); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "+testXX*", "+testY*")), - equalTo(newHashSet("testXXX", "testXXY", "testYYY"))); - assertWarnings("support for '+' as part of index expressions is deprecated"); + public void testIndicesAliasesRequestIgnoresAliases() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("test-index").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias"))) + .put(indexBuilder("index").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias2"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.add().index("test-alias"); + expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, aliasActions)); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.add().index("test-a*"); + expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, aliasActions)); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.add().index("test-index"); + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, aliasActions); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.add().index("test-*"); + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, aliasActions); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.remove().index("test-alias"); + expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, aliasActions)); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.remove().index("test-a*"); + expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, aliasActions)); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.remove().index("test-index"); + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, aliasActions); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.remove().index("test-*"); + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, aliasActions); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.removeIndex().index("test-alias"); + expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, aliasActions)); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.removeIndex().index("test-a*"); + expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(state, aliasActions)); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.removeIndex().index("test-index"); + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, aliasActions); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } + { + IndicesAliasesRequest.AliasActions aliasActions = IndicesAliasesRequest.AliasActions.removeIndex().index("test-*"); + String[] indices = indexNameExpressionResolver.concreteIndexNames(state, aliasActions); + assertEquals(1, indices.length); + assertEquals("test-index", indices[0]); + } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java index 13f7549973d..3cce782a898 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; @@ -27,8 +28,11 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import java.util.Collection; import java.util.Collections; import static org.hamcrest.Matchers.equalTo; @@ -36,6 +40,11 @@ import static org.hamcrest.Matchers.is; public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + // Tests _parent meta field logic, because part of the validation is in MetaDataMappingService public void testAddChildTypePointingToAlreadyExistingType() throws Exception { createIndex("test", Settings.EMPTY, "type", "field", "type=keyword"); @@ -54,7 +63,7 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { // Tests _parent meta field logic, because part of the validation is in MetaDataMappingService public void testAddExtraChildTypePointingToAlreadyParentExistingType() throws Exception { IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("parent") .addMapping("child1", "_parent", "type=parent") ); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java new file mode 100644 index 00000000000..36625284d47 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -0,0 +1,190 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.UnaryOperator; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class TemplateUpgradeServiceIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(TestPlugin.class); + } + + public static class TestPlugin extends Plugin { + // This setting is used to simulate cluster state updates + static final Setting UPDATE_TEMPLATE_DUMMY_SETTING = + Setting.intSetting("tests.update_template_count", 0, Setting.Property.NodeScope, Setting.Property.Dynamic); + + protected final Logger logger; + protected final Settings settings; + + public TestPlugin(Settings settings) { + this.logger = Loggers.getLogger(getClass(), settings); + this.settings = settings; + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(UPDATE_TEMPLATE_DUMMY_SETTING, integer -> { + logger.debug("the template dummy setting was updated to {}", integer); + }); + return super.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptService, xContentRegistry); + } + + @Override + public UnaryOperator> getIndexTemplateMetaDataUpgrader() { + return templates -> { + templates.put("test_added_template", IndexTemplateMetaData.builder("test_added_template") + .patterns(Collections.singletonList("*")).build()); + templates.remove("test_removed_template"); + templates.put("test_changed_template", IndexTemplateMetaData.builder("test_changed_template").order(10) + .patterns(Collections.singletonList("*")).build()); + return templates; + }; + } + + @Override + public List> getSettings() { + return Collections.singletonList(UPDATE_TEMPLATE_DUMMY_SETTING); + } + } + + + public void testTemplateUpdate() throws Exception { + assertTemplates(); + + // Change some templates + assertAcked(client().admin().indices().preparePutTemplate("test_dummy_template").setOrder(0) + .setPatterns(Collections.singletonList("*")).get()); + assertAcked(client().admin().indices().preparePutTemplate("test_changed_template").setOrder(0) + .setPatterns(Collections.singletonList("*")).get()); + assertAcked(client().admin().indices().preparePutTemplate("test_removed_template").setOrder(1) + .setPatterns(Collections.singletonList("*")).get()); + + AtomicInteger updateCount = new AtomicInteger(); + // Wait for the templates to be updated back to normal + assertBusy(() -> { + // the updates only happen on cluster state updates, so we need to make sure that the cluster state updates are happening + // so we need to simulate updates to make sure the template upgrade kicks in + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().put(TestPlugin.UPDATE_TEMPLATE_DUMMY_SETTING.getKey(), updateCount.incrementAndGet()) + ).get()); + List templates = client().admin().indices().prepareGetTemplates("test_*").get().getIndexTemplates(); + assertThat(templates, hasSize(3)); + boolean addedFound = false; + boolean changedFound = false; + boolean dummyFound = false; + for (int i = 0; i < 3; i++) { + IndexTemplateMetaData templateMetaData = templates.get(i); + switch (templateMetaData.getName()) { + case "test_added_template": + assertFalse(addedFound); + addedFound = true; + break; + case "test_changed_template": + assertFalse(changedFound); + changedFound = true; + assertThat(templateMetaData.getOrder(), equalTo(10)); + break; + case "test_dummy_template": + assertFalse(dummyFound); + dummyFound = true; + break; + default: + fail("unexpected template " + templateMetaData.getName()); + break; + } + } + assertTrue(addedFound); + assertTrue(changedFound); + assertTrue(dummyFound); + }); + + // Wipe out all templates + assertAcked(client().admin().indices().prepareDeleteTemplate("test_*").get()); + + assertTemplates(); + + } + + private void assertTemplates() throws Exception { + AtomicInteger updateCount = new AtomicInteger(); + // Make sure all templates are recreated correctly + assertBusy(() -> { + // the updates only happen on cluster state updates, so we need to make sure that the cluster state updates are happening + // so we need to simulate updates to make sure the template upgrade kicks in + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().put(TestPlugin.UPDATE_TEMPLATE_DUMMY_SETTING.getKey(), updateCount.incrementAndGet()) + ).get()); + + List templates = client().admin().indices().prepareGetTemplates("test_*").get().getIndexTemplates(); + assertThat(templates, hasSize(2)); + boolean addedFound = false; + boolean changedFound = false; + for (int i = 0; i < 2; i++) { + IndexTemplateMetaData templateMetaData = templates.get(i); + switch (templateMetaData.getName()) { + case "test_added_template": + assertFalse(addedFound); + addedFound = true; + break; + case "test_changed_template": + assertFalse(changedFound); + changedFound = true; + assertThat(templateMetaData.getOrder(), equalTo(10)); + break; + default: + fail("unexpected template " + templateMetaData.getName()); + break; + } + } + + assertTrue(addedFound); + assertTrue(changedFound); + }); + } + +} diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java new file mode 100644 index 00000000000..f4e8ba21fc0 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -0,0 +1,438 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +public class TemplateUpgradeServiceTests extends ESTestCase { + + private final ClusterService clusterService = new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); + + public void testCalculateChangesAddChangeAndDelete() { + + boolean shouldAdd = randomBoolean(); + boolean shouldRemove = randomBoolean(); + boolean shouldChange = randomBoolean(); + + MetaData metaData = randomMetaData( + IndexTemplateMetaData.builder("user_template").build(), + IndexTemplateMetaData.builder("removed_test_template").build(), + IndexTemplateMetaData.builder("changed_test_template").build() + ); + + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, + Arrays.asList( + templates -> { + if (shouldAdd) { + assertNull(templates.put("added_test_template", IndexTemplateMetaData.builder("added_test_template").build())); + } + return templates; + }, + templates -> { + if (shouldRemove) { + assertNotNull(templates.remove("removed_test_template")); + } + return templates; + }, + templates -> { + if (shouldChange) { + assertNotNull(templates.put("changed_test_template", + IndexTemplateMetaData.builder("changed_test_template").order(10).build())); + } + return templates; + } + )); + + Optional, Set>> optChanges = + service.calculateTemplateChanges(metaData.templates()); + + if (shouldAdd || shouldRemove || shouldChange) { + Tuple, Set> changes = optChanges.orElseThrow(() -> + new AssertionError("Should have non empty changes")); + if (shouldAdd) { + assertThat(changes.v1().get("added_test_template"), notNullValue()); + if (shouldChange) { + assertThat(changes.v1().keySet(), hasSize(2)); + assertThat(changes.v1().get("changed_test_template"), notNullValue()); + } else { + assertThat(changes.v1().keySet(), hasSize(1)); + } + } else { + if (shouldChange) { + assertThat(changes.v1().get("changed_test_template"), notNullValue()); + assertThat(changes.v1().keySet(), hasSize(1)); + } else { + assertThat(changes.v1().keySet(), empty()); + } + } + + if (shouldRemove) { + assertThat(changes.v2(), hasSize(1)); + assertThat(changes.v2().contains("removed_test_template"), equalTo(true)); + } else { + assertThat(changes.v2(), empty()); + } + } else { + assertThat(optChanges.isPresent(), equalTo(false)); + } + } + + + @SuppressWarnings("unchecked") + public void testUpdateTemplates() { + int additionsCount = randomIntBetween(0, 5); + int deletionsCount = randomIntBetween(0, 3); + + List> putTemplateListeners = new ArrayList<>(); + List> deleteTemplateListeners = new ArrayList<>(); + + Client mockClient = mock(Client.class); + AdminClient mockAdminClient = mock(AdminClient.class); + IndicesAdminClient mockIndicesAdminClient = mock(IndicesAdminClient.class); + when(mockClient.admin()).thenReturn(mockAdminClient); + when(mockAdminClient.indices()).thenReturn(mockIndicesAdminClient); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + PutIndexTemplateRequest request = (PutIndexTemplateRequest) args[0]; + assertThat(request.name(), equalTo("add_template_" + request.order())); + putTemplateListeners.add((ActionListener) args[1]); + return null; + }).when(mockIndicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class)); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + DeleteIndexTemplateRequest request = (DeleteIndexTemplateRequest) args[0]; + assertThat(request.name(), startsWith("remove_template_")); + deleteTemplateListeners.add((ActionListener) args[1]); + return null; + }).when(mockIndicesAdminClient).deleteTemplate(any(DeleteIndexTemplateRequest.class), any(ActionListener.class)); + + Set deletions = new HashSet<>(deletionsCount); + for (int i = 0; i < deletionsCount; i++) { + deletions.add("remove_template_" + i); + } + Map additions = new HashMap<>(additionsCount); + for (int i = 0; i < additionsCount; i++) { + additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}")); + } + + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, null, + Collections.emptyList()); + + service.updateTemplates(additions, deletions); + int updatesInProgress = service.getUpdatesInProgress(); + + assertThat(putTemplateListeners, hasSize(additionsCount)); + assertThat(deleteTemplateListeners, hasSize(deletionsCount)); + + for (int i = 0; i < additionsCount; i++) { + if (randomBoolean()) { + putTemplateListeners.get(i).onFailure(new RuntimeException("test - ignore")); + } else { + putTemplateListeners.get(i).onResponse(new PutIndexTemplateResponse(randomBoolean()) { + + }); + } + } + + for (int i = 0; i < deletionsCount; i++) { + if (randomBoolean()) { + int prevUpdatesInProgress = service.getUpdatesInProgress(); + deleteTemplateListeners.get(i).onFailure(new RuntimeException("test - ignore")); + assertThat(prevUpdatesInProgress - service.getUpdatesInProgress(), equalTo(1)); + } else { + int prevUpdatesInProgress = service.getUpdatesInProgress(); + deleteTemplateListeners.get(i).onResponse(new DeleteIndexTemplateResponse(randomBoolean()) { + + }); + assertThat(prevUpdatesInProgress - service.getUpdatesInProgress(), equalTo(1)); + } + } + assertThat(updatesInProgress - service.getUpdatesInProgress(), equalTo(additionsCount + deletionsCount)); + } + + private static final Set MASTER_DATA_ROLES = + Collections.unmodifiableSet(EnumSet.of(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA)); + + @SuppressWarnings("unchecked") + public void testClusterStateUpdate() { + + AtomicReference> addedListener = new AtomicReference<>(); + AtomicReference> changedListener = new AtomicReference<>(); + AtomicReference> removedListener = new AtomicReference<>(); + AtomicInteger updateInvocation = new AtomicInteger(); + + MetaData metaData = randomMetaData( + IndexTemplateMetaData.builder("user_template").build(), + IndexTemplateMetaData.builder("removed_test_template").build(), + IndexTemplateMetaData.builder("changed_test_template").build() + ); + + ThreadPool threadPool = mock(ThreadPool.class); + ExecutorService executorService = mock(ExecutorService.class); + when(threadPool.generic()).thenReturn(executorService); + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 1; + Runnable runnable = (Runnable) args[0]; + runnable.run(); + updateInvocation.incrementAndGet(); + return null; + }).when(executorService).execute(any(Runnable.class)); + + Client mockClient = mock(Client.class); + AdminClient mockAdminClient = mock(AdminClient.class); + IndicesAdminClient mockIndicesAdminClient = mock(IndicesAdminClient.class); + when(mockClient.admin()).thenReturn(mockAdminClient); + when(mockAdminClient.indices()).thenReturn(mockIndicesAdminClient); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + PutIndexTemplateRequest request = (PutIndexTemplateRequest) args[0]; + if (request.name().equals("added_test_template")) { + assertThat(addedListener.getAndSet((ActionListener) args[1]), nullValue()); + } else if (request.name().equals("changed_test_template")) { + assertThat(changedListener.getAndSet((ActionListener) args[1]), nullValue()); + } else { + fail("unexpected put template call for " + request.name()); + } + return null; + }).when(mockIndicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class)); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + DeleteIndexTemplateRequest request = (DeleteIndexTemplateRequest) args[0]; + assertThat(request.name(), startsWith("removed_test_template")); + assertThat(removedListener.getAndSet((ActionListener) args[1]), nullValue()); + return null; + }).when(mockIndicesAdminClient).deleteTemplate(any(DeleteIndexTemplateRequest.class), any(ActionListener.class)); + + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + Arrays.asList( + templates -> { + assertNull(templates.put("added_test_template", IndexTemplateMetaData.builder("added_test_template") + .patterns(Collections.singletonList("*")).build())); + return templates; + }, + templates -> { + assertNotNull(templates.remove("removed_test_template")); + return templates; + }, + templates -> { + assertNotNull(templates.put("changed_test_template", IndexTemplateMetaData.builder("changed_test_template") + .patterns(Collections.singletonList("*")).order(10).build())); + return templates; + } + )); + + ClusterState prevState = ClusterState.EMPTY_STATE; + ClusterState state = ClusterState.builder(prevState).nodes(DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", "node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT) + ).localNodeId("node1").masterNodeId("node1").build() + ).metaData(metaData).build(); + service.clusterChanged(new ClusterChangedEvent("test", state, prevState)); + + assertThat(updateInvocation.get(), equalTo(1)); + assertThat(addedListener.get(), notNullValue()); + assertThat(changedListener.get(), notNullValue()); + assertThat(removedListener.get(), notNullValue()); + + prevState = state; + state = ClusterState.builder(prevState).metaData(MetaData.builder(state.metaData()).removeTemplate("user_template")).build(); + service.clusterChanged(new ClusterChangedEvent("test 2", state, prevState)); + + // Make sure that update wasn't invoked since we are still running + assertThat(updateInvocation.get(), equalTo(1)); + + addedListener.getAndSet(null).onResponse(new PutIndexTemplateResponse(true) { + }); + changedListener.getAndSet(null).onResponse(new PutIndexTemplateResponse(true) { + }); + removedListener.getAndSet(null).onResponse(new DeleteIndexTemplateResponse(true) { + }); + + service.clusterChanged(new ClusterChangedEvent("test 3", state, prevState)); + + // Make sure that update was called this time since we are no longer running + assertThat(updateInvocation.get(), equalTo(2)); + + addedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); + changedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); + removedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); + + service.clusterChanged(new ClusterChangedEvent("test 3", state, prevState)); + + // Make sure that update wasn't called this time since the index template metadata didn't change + assertThat(updateInvocation.get(), equalTo(2)); + } + + private static final int NODE_TEST_ITERS = 100; + + public void testOnlyOneNodeRunsTemplateUpdates() { + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, Collections.emptyList()); + for (int i = 0; i < NODE_TEST_ITERS; i++) { + int nodesCount = randomIntBetween(1, 10); + int clientNodesCount = randomIntBetween(0, 4); + DiscoveryNodes nodes = randomNodes(nodesCount, clientNodesCount); + int updaterNode = -1; + for (int j = 0; j < nodesCount; j++) { + DiscoveryNodes localNodes = DiscoveryNodes.builder(nodes).localNodeId(nodes.resolveNode("node_" + j).getId()).build(); + if (service.shouldLocalNodeUpdateTemplates(localNodes)) { + assertThat("Expected only one node to update template, found " + updaterNode + " and " + j, updaterNode, lessThan(0)); + updaterNode = j; + } + } + assertThat("Expected one node to update template", updaterNode, greaterThanOrEqualTo(0)); + } + } + + public void testIfMasterHasTheHighestVersionItShouldRunsTemplateUpdates() { + for (int i = 0; i < NODE_TEST_ITERS; i++) { + int nodesCount = randomIntBetween(1, 10); + int clientNodesCount = randomIntBetween(0, 4); + DiscoveryNodes nodes = randomNodes(nodesCount, clientNodesCount); + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(nodes).localNodeId(nodes.resolveNode("_master").getId()); + nodes = builder.build(); + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, + Collections.emptyList()); + assertThat(service.shouldLocalNodeUpdateTemplates(nodes), + equalTo(nodes.getLargestNonClientNodeVersion().equals(nodes.getMasterNode().getVersion()))); + } + } + + public void testClientNodeDontRunTemplateUpdates() { + for (int i = 0; i < NODE_TEST_ITERS; i++) { + int nodesCount = randomIntBetween(1, 10); + int clientNodesCount = randomIntBetween(1, 4); + DiscoveryNodes nodes = randomNodes(nodesCount, clientNodesCount); + int testClient = randomIntBetween(0, clientNodesCount - 1); + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(nodes).localNodeId(nodes.resolveNode("client_" + testClient).getId()); + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, + Collections.emptyList()); + assertThat(service.shouldLocalNodeUpdateTemplates(builder.build()), equalTo(false)); + } + } + + private DiscoveryNodes randomNodes(int dataAndMasterNodes, int clientNodes) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + String masterNodeId = null; + for (int i = 0; i < dataAndMasterNodes; i++) { + String id = randomAlphaOfLength(10) + "_" + i; + Set roles; + if (i == 0) { + masterNodeId = id; + // The first node has to be master node + if (randomBoolean()) { + roles = EnumSet.of(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA); + } else { + roles = EnumSet.of(DiscoveryNode.Role.MASTER); + } + } else { + if (randomBoolean()) { + roles = EnumSet.of(DiscoveryNode.Role.DATA); + } else { + roles = EnumSet.of(DiscoveryNode.Role.MASTER); + } + } + String node = "node_" + i; + builder.add(new DiscoveryNode(node, id, buildNewFakeTransportAddress(), emptyMap(), roles, randomVersion(random()))); + } + builder.masterNodeId(masterNodeId); // Node 0 is always a master node + + for (int i = 0; i < clientNodes; i++) { + String node = "client_" + i; + builder.add(new DiscoveryNode(node, randomAlphaOfLength(10) + "__" + i, buildNewFakeTransportAddress(), emptyMap(), + EnumSet.noneOf(DiscoveryNode.Role.class), randomVersion(random()))); + } + return builder.build(); + } + + public static MetaData randomMetaData(IndexTemplateMetaData... templates) { + MetaData.Builder builder = MetaData.builder(); + for (IndexTemplateMetaData template : templates) { + builder.put(template); + } + for (int i = 0; i < randomIntBetween(1, 5); i++) { + builder.put( + IndexMetaData.builder(randomAlphaOfLength(10)) + .settings(settings(Version.CURRENT)) + .numberOfReplicas(randomIntBetween(0, 3)) + .numberOfShards(randomIntBetween(1, 5)) + ); + } + return builder.build(); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 3c8b540f45c..e918f2acd4f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -182,20 +182,4 @@ public class WildcardExpressionResolverTests extends ESTestCase { private IndexMetaData.Builder indexBuilder(String index) { return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } - - public void testForDeprecatedPlusPattern() { - MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("testXXX").state(IndexMetaData.State.OPEN)) - .put(indexBuilder("testXYY").state(IndexMetaData.State.OPEN)) - .put(indexBuilder("testYYY").state(IndexMetaData.State.OPEN)); - ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); - - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testX*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("+testYYY", "+testXY*"))), equalTo(newHashSet("testYYY", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYYY", "+testXX*"))), equalTo(newHashSet("testXXX", "testYYY"))); - assertWarnings("support for '+' as part of index expressions is deprecated"); - } - } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 853f0f65612..e82dbf4d0e9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -68,12 +68,7 @@ public class DelayedAllocationIT extends ESIntegTestCase { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)); - } - }); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); internalCluster().startNode(); // this will use the same data location as the stopped node ensureGreen("test"); @@ -114,12 +109,7 @@ public class DelayedAllocationIT extends ESIntegTestCase { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)); - } - }); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 51ddc0f3fd9..93ac2878abc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -57,12 +57,9 @@ public class MockDiskUsagesIT extends ESIntegTestCase { List nodes = internalCluster().startNodes(3); // Wait for all 3 nodes to be up - assertBusy(new Runnable() { - @Override - public void run() { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - } + assertBusy(() -> { + NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); + assertThat(resp.getNodes().size(), equalTo(3)); }); // Start with all nodes at 50% usage @@ -86,13 +83,10 @@ public class MockDiskUsagesIT extends ESIntegTestCase { ensureGreen("test"); // Block until the "fake" cluster info is retrieved at least once - assertBusy(new Runnable() { - @Override - public void run() { - ClusterInfo info = cis.getClusterInfo(); - logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); - assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); - } + assertBusy(() -> { + ClusterInfo info = cis.getClusterInfo(); + logger.info("--> got: {} nodes", info.getNodeLeastAvailableDiskUsages().size()); + assertThat(info.getNodeLeastAvailableDiskUsages().size(), greaterThan(0)); }); final List realNodeNames = new ArrayList<>(); @@ -113,21 +107,18 @@ public class MockDiskUsagesIT extends ESIntegTestCase { // Retrieve the count of shards on each node final Map nodesToShardCount = new HashMap<>(); - assertBusy(new Runnable() { - @Override - public void run() { - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - Iterator iter = resp.getState().getRoutingNodes().iterator(); - while (iter.hasNext()) { - RoutingNode node = iter.next(); - logger.info("--> node {} has {} shards", - node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - } - assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5)); - assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5)); - assertThat("node3 has 0 shards", nodesToShardCount.get(realNodeNames.get(2)), equalTo(0)); + assertBusy(() -> { + ClusterStateResponse resp12 = client().admin().cluster().prepareState().get(); + Iterator iter12 = resp12.getState().getRoutingNodes().iterator(); + while (iter12.hasNext()) { + RoutingNode node = iter12.next(); + logger.info("--> node {} has {} shards", + node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), resp12.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } + assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5)); + assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5)); + assertThat("node3 has 0 shards", nodesToShardCount.get(realNodeNames.get(2)), equalTo(0)); }); // Update the disk usages so one node is now back under the high watermark @@ -138,21 +129,18 @@ public class MockDiskUsagesIT extends ESIntegTestCase { // Retrieve the count of shards on each node nodesToShardCount.clear(); - assertBusy(new Runnable() { - @Override - public void run() { - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - Iterator iter = resp.getState().getRoutingNodes().iterator(); - while (iter.hasNext()) { - RoutingNode node = iter.next(); - logger.info("--> node {} has {} shards", - node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - nodesToShardCount.put(node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); - } - assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3)); - assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3)); - assertThat("node3 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(2)), greaterThanOrEqualTo(3)); + assertBusy(() -> { + ClusterStateResponse resp1 = client().admin().cluster().prepareState().get(); + Iterator iter1 = resp1.getState().getRoutingNodes().iterator(); + while (iter1.hasNext()) { + RoutingNode node = iter1.next(); + logger.info("--> node {} has {} shards", + node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); + nodesToShardCount.put(node.nodeId(), resp1.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards()); } + assertThat("node1 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(0)), greaterThanOrEqualTo(3)); + assertThat("node2 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(1)), greaterThanOrEqualTo(3)); + assertThat("node3 has at least 3 shards", nodesToShardCount.get(realNodeNames.get(2)), greaterThanOrEqualTo(3)); }); } } diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java index ff8b25c796d..d067b813d10 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java @@ -81,7 +81,7 @@ public class SimpleAllTests extends ESTestCase { Query query = new AllTermQuery(new Term("_all", "else")); TopDocs docs = searcher.search(query, 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertExplanationScore(searcher, query, docs.scoreDocs[0]); assertThat(docs.scoreDocs[1].doc, equalTo(1)); @@ -89,7 +89,7 @@ public class SimpleAllTests extends ESTestCase { query = new AllTermQuery(new Term("_all", "something")); docs = searcher.search(query, 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertExplanationScore(searcher, query, docs.scoreDocs[0]); assertThat(docs.scoreDocs[1].doc, equalTo(1)); @@ -123,7 +123,7 @@ public class SimpleAllTests extends ESTestCase { // this one is boosted. so the second doc is more relevant Query query = new AllTermQuery(new Term("_all", "else")); TopDocs docs = searcher.search(query, 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(1)); assertExplanationScore(searcher, query, docs.scoreDocs[0]); assertThat(docs.scoreDocs[1].doc, equalTo(0)); @@ -131,7 +131,7 @@ public class SimpleAllTests extends ESTestCase { query = new AllTermQuery(new Term("_all", "something")); docs = searcher.search(query, 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertExplanationScore(searcher, query, docs.scoreDocs[0]); assertThat(docs.scoreDocs[1].doc, equalTo(1)); @@ -192,22 +192,22 @@ public class SimpleAllTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertThat(docs.scoreDocs[1].doc, equalTo(1)); docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertThat(docs.scoreDocs[1].doc, equalTo(1)); docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertThat(docs.scoreDocs[1].doc, equalTo(1)); docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertThat(docs.scoreDocs[1].doc, equalTo(1)); @@ -237,22 +237,22 @@ public class SimpleAllTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(1)); assertThat(docs.scoreDocs[1].doc, equalTo(0)); docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(1)); assertThat(docs.scoreDocs[1].doc, equalTo(0)); docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertThat(docs.scoreDocs[1].doc, equalTo(1)); docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10); - assertThat(docs.totalHits, equalTo(2)); + assertThat(docs.totalHits, equalTo(2L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); assertThat(docs.scoreDocs[1].doc, equalTo(1)); @@ -273,7 +273,7 @@ public class SimpleAllTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10); - assertThat(docs.totalHits, equalTo(1)); + assertThat(docs.totalHits, equalTo(1L)); assertThat(docs.scoreDocs[0].doc, equalTo(0)); } } diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java index 369129826e0..bd15805fa60 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java @@ -44,7 +44,7 @@ public class ScriptScoreFunctionTests extends ESTestCase { } @Override - public boolean needsScores() { + public boolean needs_score() { return false; } }); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java index e8b5220396d..e1ca8379972 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java @@ -26,10 +26,10 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -46,23 +46,31 @@ public class VersionLookupTests extends ESTestCase { */ public void testSimple() throws Exception { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + // to have deleted docs + .setMergePolicy(NoMergePolicy.INSTANCE)); Document doc = new Document(); doc.add(new Field(IdFieldMapper.NAME, "6", IdFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); + writer.addDocument(new Document()); DirectoryReader reader = DirectoryReader.open(writer); LeafReaderContext segment = reader.leaves().get(0); PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader(), IdFieldMapper.NAME); // found doc - DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment); + DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), segment); assertNotNull(result); assertEquals(87, result.version); assertEquals(0, result.docId); // not found doc - assertNull(lookup.lookupVersion(new BytesRef("7"), null, segment)); + assertNull(lookup.lookupVersion(new BytesRef("7"), segment)); // deleted doc - assertNull(lookup.lookupVersion(new BytesRef("6"), new Bits.MatchNoBits(1), segment)); + writer.deleteDocuments(new Term(IdFieldMapper.NAME, "6")); + reader.close(); + reader = DirectoryReader.open(writer); + segment = reader.leaves().get(0); + lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader(), IdFieldMapper.NAME); + assertNull(lookup.lookupVersion(new BytesRef("6"), segment)); reader.close(); writer.close(); dir.close(); @@ -73,36 +81,39 @@ public class VersionLookupTests extends ESTestCase { */ public void testTwoDocuments() throws Exception { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setMergePolicy(NoMergePolicy.INSTANCE)); Document doc = new Document(); doc.add(new Field(IdFieldMapper.NAME, "6", IdFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); writer.addDocument(doc); + writer.addDocument(new Document()); DirectoryReader reader = DirectoryReader.open(writer); LeafReaderContext segment = reader.leaves().get(0); PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader(), IdFieldMapper.NAME); // return the last doc when there are duplicates - DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment); + DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), segment); assertNotNull(result); assertEquals(87, result.version); assertEquals(1, result.docId); // delete the first doc only - FixedBitSet live = new FixedBitSet(2); - live.set(1); - result = lookup.lookupVersion(new BytesRef("6"), live, segment); + assertTrue(writer.tryDeleteDocument(reader, 0) >= 0); + reader.close(); + reader = DirectoryReader.open(writer); + segment = reader.leaves().get(0); + lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader(), IdFieldMapper.NAME); + result = lookup.lookupVersion(new BytesRef("6"), segment); assertNotNull(result); assertEquals(87, result.version); assertEquals(1, result.docId); - // delete the second doc only - live.clear(1); - live.set(0); - result = lookup.lookupVersion(new BytesRef("6"), live, segment); - assertNotNull(result); - assertEquals(87, result.version); - assertEquals(0, result.docId); // delete both docs - assertNull(lookup.lookupVersion(new BytesRef("6"), new Bits.MatchNoBits(2), segment)); + assertTrue(writer.tryDeleteDocument(reader, 1) >= 0); + reader.close(); + reader = DirectoryReader.open(writer); + segment = reader.leaves().get(0); + lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader(), IdFieldMapper.NAME); + assertNull(lookup.lookupVersion(new BytesRef("6"), segment)); reader.close(); writer.close(); dir.close(); diff --git a/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java index 9044103e43b..9b592b79641 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java @@ -37,7 +37,7 @@ public class AddFileKeyStoreCommandTests extends KeyStoreCommandTestCase { protected Command newCommand() { return new AddFileKeyStoreCommand() { @Override - protected Environment createEnv(Terminal terminal, Map settings) { + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { return env; } }; diff --git a/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java index 11c3f107fe7..20bf7421f7a 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.settings; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.nio.file.Path; import java.util.Map; import org.elasticsearch.cli.Command; @@ -39,7 +40,7 @@ public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase { protected Command newCommand() { return new AddStringKeyStoreCommand() { @Override - protected Environment createEnv(Terminal terminal, Map settings) { + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { return env; } @Override diff --git a/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java index 5d4741c7291..da81f977e50 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java @@ -34,7 +34,7 @@ public class CreateKeyStoreCommandTests extends KeyStoreCommandTestCase { protected Command newCommand() { return new CreateKeyStoreCommand() { @Override - protected Environment createEnv(Terminal terminal, Map settings) { + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { return env; } }; diff --git a/core/src/test/java/org/elasticsearch/common/settings/ListKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/ListKeyStoreCommandTests.java index 1a8bdfc077d..272ff5f419c 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ListKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ListKeyStoreCommandTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; +import java.nio.file.Path; import java.util.Map; import org.elasticsearch.cli.Command; @@ -35,7 +36,7 @@ public class ListKeyStoreCommandTests extends KeyStoreCommandTestCase { protected Command newCommand() { return new ListKeyStoreCommand() { @Override - protected Environment createEnv(Terminal terminal, Map settings) { + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { return env; } }; diff --git a/core/src/test/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommandTests.java index b74382d8cf5..1ec25873a3b 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/RemoveSettingKeyStoreCommandTests.java @@ -19,18 +19,16 @@ package org.elasticsearch.common.settings; -import javax.crypto.SecretKeyFactory; -import java.security.Provider; -import java.security.Security; -import java.util.Map; -import java.util.Set; - import org.elasticsearch.cli.Command; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.env.Environment; +import java.nio.file.Path; +import java.util.Map; +import java.util.Set; + import static org.hamcrest.Matchers.containsString; public class RemoveSettingKeyStoreCommandTests extends KeyStoreCommandTestCase { @@ -39,7 +37,7 @@ public class RemoveSettingKeyStoreCommandTests extends KeyStoreCommandTestCase { protected Command newCommand() { return new RemoveSettingKeyStoreCommand() { @Override - protected Environment createEnv(Terminal terminal, Map settings) { + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { return env; } }; diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 0bb1abb37ad..1ac94b6caa3 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -154,6 +154,22 @@ public class SettingTests extends ESTestCase { assertNull(ab2.get()); } + public void testDeprecatedSetting() { + final Setting deprecatedSetting = Setting.boolSetting("deprecated.foo.bar", false, Property.Deprecated); + final Settings settings = Settings.builder().put("deprecated.foo.bar", true).build(); + final int iterations = randomIntBetween(0, 128); + for (int i = 0; i < iterations; i++) { + deprecatedSetting.get(settings); + } + if (iterations > 0) { + /* + * This tests that we log the deprecation warning exactly one time, otherwise we would have to assert the deprecation warning + * for each usage of the setting. + */ + assertSettingDeprecationsAndWarnings(new Setting[]{deprecatedSetting}); + } + } + public void testDefault() { TimeValue defaultValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); Setting setting = diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index 9fbad982bdb..72c4aca544c 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -21,6 +21,8 @@ package org.elasticsearch.common.settings; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.loader.YamlSettingsLoader; @@ -590,6 +592,24 @@ public class SettingsTests extends ESTestCase { assertTrue(Settings.builder().setSecureSettings(secureSettings).build().isEmpty()); } + public void testWriteSettingsToStream() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("test.key1.foo", "somethingsecure"); + secureSettings.setString("test.key1.bar", "somethingsecure"); + secureSettings.setString("test.key2.foo", "somethingsecure"); + secureSettings.setString("test.key2.bog", "somethingsecure"); + Settings.Builder builder = Settings.builder(); + builder.put("test.key1.baz", "blah1"); + builder.setSecureSettings(secureSettings); + assertEquals(5, builder.build().size()); + Settings.writeSettingsToStream(builder.build(), out); + StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); + Settings settings = Settings.readSettingsFromStream(in); + assertEquals(1, settings.size()); + assertEquals("blah1", settings.get("test.key1.baz")); + } + public void testSecureSettingConflict() { Setting setting = SecureSetting.secureString("something.secure", null); Settings settings = Settings.builder().put("something.secure", "notreallysecure").build(); diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index 72db2911fc0..142123bb483 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -229,12 +229,9 @@ public class EsExecutorsTests extends ESTestCase { assertThat("wrong pool size", pool.getPoolSize(), equalTo(max)); assertThat("wrong active size", pool.getActiveCount(), equalTo(max)); barrier.await(); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat("wrong active count", pool.getActiveCount(), equalTo(0)); - assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), lessThan(max)); - } + assertBusy(() -> { + assertThat("wrong active count", pool.getActiveCount(), equalTo(0)); + assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), lessThan(max)); }); terminate(pool); } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 3ed105080b3..17b43a079dc 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -264,12 +264,7 @@ public class PrioritizedExecutorsTests extends ESTestCase { // the timeout handler is added post execution (and quickly cancelled). We have allow for this // and use assert busy - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(timer.getQueue().size(), equalTo(0)); - } - }, 5, TimeUnit.SECONDS); + assertBusy(() -> assertThat(timer.getQueue().size(), equalTo(0)), 5, TimeUnit.SECONDS); assertThat(timeoutCalled.get(), equalTo(false)); assertTrue(terminate(executor)); assertTrue(terminate(threadPool)); diff --git a/core/src/test/java/org/elasticsearch/common/util/set/SetsTests.java b/core/src/test/java/org/elasticsearch/common/util/set/SetsTests.java new file mode 100644 index 00000000000..0c1869a6b40 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/util/set/SetsTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util.set; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class SetsTests extends ESTestCase { + + public void testDifference() { + final int endExclusive = randomIntBetween(0, 256); + final Tuple, Set> sets = randomSets(endExclusive); + final Set difference = Sets.difference(sets.v1(), sets.v2()); + assertDifference(endExclusive, sets, difference); + } + + public void testSortedDifference() { + final int endExclusive = randomIntBetween(0, 256); + final Tuple, Set> sets = randomSets(endExclusive); + final Set difference = Sets.sortedDifference(sets.v1(), sets.v2()); + assertDifference(endExclusive, sets, difference); + final Iterator it = difference.iterator(); + if (it.hasNext()) { + int current = it.next(); + while (it.hasNext()) { + final int next = it.next(); + assertThat(next, greaterThan(current)); + current = next; + } + } + } + + /** + * Assert the difference between two sets is as expected. + * + * @param endExclusive the exclusive upper bound of the elements of either set + * @param sets a pair of sets with elements from {@code [0, endExclusive)} + * @param difference the difference between the two sets + */ + private void assertDifference( + final int endExclusive, final Tuple, Set> sets, final Set difference) { + for (int i = 0; i < endExclusive; i++) { + assertThat(difference.contains(i), equalTo(sets.v1().contains(i) && !sets.v2().contains(i))); + } + } + + /** + * Produces two random sets consisting of elements from {@code [0, endExclusive)}. + * + * @param endExclusive the exclusive upper bound of the elements of the sets + * @return a pair of sets + */ + private Tuple, Set> randomSets(final int endExclusive) { + final Set left = new HashSet<>(randomSubsetOf(IntStream.range(0, endExclusive).boxed().collect(Collectors.toSet()))); + final Set right = new HashSet<>(randomSubsetOf(IntStream.range(0, endExclusive).boxed().collect(Collectors.toSet()))); + return Tuple.tuple(left, right); + } + +} diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java index 4b90016eaa4..26bf83d7d56 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java @@ -19,19 +19,25 @@ package org.elasticsearch.common.xcontent; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; import static org.elasticsearch.common.xcontent.XContentParserUtils.parseTypedKeysObject; +import static org.hamcrest.Matchers.containsString; public class XContentParserUtilsTests extends ESTestCase { @@ -48,38 +54,113 @@ public class XContentParserUtilsTests extends ESTestCase { } } + public void testParseStoredFieldsValueString() throws IOException { + final String value = randomAlphaOfLengthBetween(0, 10); + assertParseStoredFieldsValue(value, (xcontentType, result) -> assertEquals(value, result)); + } + + public void testParseStoredFieldsValueInt() throws IOException { + final Integer value = randomInt(); + assertParseStoredFieldsValue(value, (xcontentType, result) -> assertEquals(value, result)); + } + + public void testParseStoredFieldsValueLong() throws IOException { + final Long value = randomLong(); + assertParseStoredFieldsValue(value, (xcontentType, result) -> assertEquals(value, result)); + } + + public void testParseStoredFieldsValueDouble() throws IOException { + final Double value = randomDouble(); + assertParseStoredFieldsValue(value, (xcontentType, result) -> assertEquals(value, ((Number) result).doubleValue(), 0.0d)); + } + + public void testParseStoredFieldsValueFloat() throws IOException { + final Float value = randomFloat(); + assertParseStoredFieldsValue(value, (xcontentType, result) -> assertEquals(value, ((Number) result).floatValue(), 0.0f)); + } + + public void testParseStoredFieldsValueBoolean() throws IOException { + final Boolean value = randomBoolean(); + assertParseStoredFieldsValue(value, (xcontentType, result) -> assertEquals(value, result)); + } + + public void testParseStoredFieldsValueBinary() throws IOException { + final byte[] value = randomUnicodeOfLength(scaledRandomIntBetween(10, 1000)).getBytes("UTF-8"); + assertParseStoredFieldsValue(value, (xcontentType, result) -> { + if (xcontentType == XContentType.JSON || xcontentType == XContentType.YAML) { + //binary values will be parsed back and returned as base64 strings when reading from json and yaml + assertArrayEquals(value, Base64.getDecoder().decode((String) result)); + } else { + //binary values will be parsed back and returned as BytesArray when reading from cbor and smile + assertArrayEquals(value, ((BytesArray) result).array()); + } + }); + } + + public void testParseStoredFieldsValueUnknown() throws IOException { + ParsingException e = expectThrows(ParsingException.class, () -> + assertParseStoredFieldsValue(null, (x, r) -> fail("Should have thrown a parsing exception"))); + assertThat(e.getMessage(), containsString("unexpected token")); + } + + private void assertParseStoredFieldsValue(final Object value, final CheckedBiConsumer consumer) + throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + final String fieldName = randomAlphaOfLengthBetween(0, 10); + + builder.startObject(); + builder.field(fieldName, value); + builder.endObject(); + + try (XContentParser parser = createParser(builder)) { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + ensureFieldName(parser, parser.nextToken(), fieldName); + assertNotNull(parser.nextToken()); + consumer.accept(xContentType, XContentParserUtils.parseStoredFieldsValue(parser)); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); + assertNull(parser.nextToken()); + } + } + } + public void testParseTypedKeysObject() throws IOException { final String delimiter = randomFrom("#", ":", "/", "-", "_", "|", "_delim_"); final XContentType xContentType = randomFrom(XContentType.values()); + final ObjectParser, Void> BOOLPARSER = new ObjectParser<>("bool", () -> new SetOnce<>()); + BOOLPARSER.declareBoolean(SetOnce::set, new ParseField("field")); + final ObjectParser, Void> LONGPARSER = new ObjectParser<>("long", () -> new SetOnce<>()); + LONGPARSER.declareLong(SetOnce::set, new ParseField("field")); + List namedXContents = new ArrayList<>(); - namedXContents.add(new NamedXContentRegistry.Entry(Boolean.class, new ParseField("bool"), parser -> { - ensureExpectedToken(XContentParser.Token.VALUE_BOOLEAN, parser.nextToken(), parser::getTokenLocation); - return parser.booleanValue(); - })); - namedXContents.add(new NamedXContentRegistry.Entry(Long.class, new ParseField("long"), parser -> { - ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, parser.nextToken(), parser::getTokenLocation); - return parser.longValue(); - })); + namedXContents.add(new NamedXContentRegistry.Entry(Boolean.class, new ParseField("bool"), p -> BOOLPARSER.parse(p, null).get())); + namedXContents.add(new NamedXContentRegistry.Entry(Long.class, new ParseField("long"), p -> LONGPARSER.parse(p, null).get())); final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(namedXContents); - BytesReference bytes = toXContent((builder, params) -> builder.field("test", 0), xContentType, randomBoolean()); + BytesReference bytes = toXContent((builder, params) -> builder.startObject("name").field("field", 0).endObject(), xContentType, + randomBoolean()); try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); - - ParsingException e = expectThrows(ParsingException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class)); - assertEquals("Cannot parse object of class [Boolean] without type information. Set [typed_keys] parameter " + - "on the request to ensure the type information is added to the response output", e.getMessage()); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + SetOnce booleanConsumer = new SetOnce<>(); + parseTypedKeysObject(parser, delimiter, Boolean.class, booleanConsumer::set); + // because of the missing type to identify the parser, we expect no return value, but also no exception + assertNull(booleanConsumer.get()); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.currentToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); + assertNull(parser.nextToken()); } - bytes = toXContent((builder, params) -> builder.field("type" + delimiter + "name", 0), xContentType, randomBoolean()); + bytes = toXContent((builder, params) -> builder.startObject("type" + delimiter + "name").field("bool", true).endObject(), + xContentType, randomBoolean()); try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); - + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); NamedXContentRegistry.UnknownNamedObjectException e = expectThrows(NamedXContentRegistry.UnknownNamedObjectException.class, - () -> parseTypedKeysObject(parser, delimiter, Boolean.class)); + () -> parseTypedKeysObject(parser, delimiter, Boolean.class, a -> {})); assertEquals("Unknown Boolean [type]", e.getMessage()); assertEquals("type", e.getName()); assertEquals("java.lang.Boolean", e.getCategoryClass()); @@ -88,8 +169,8 @@ public class XContentParserUtilsTests extends ESTestCase { final long longValue = randomLong(); final boolean boolValue = randomBoolean(); bytes = toXContent((builder, params) -> { - builder.field("long" + delimiter + "l", longValue); - builder.field("bool" + delimiter + "b", boolValue); + builder.startObject("long" + delimiter + "l").field("field", longValue).endObject(); + builder.startObject("bool" + delimiter + "l").field("field", boolValue).endObject(); return builder; }, xContentType, randomBoolean()); @@ -97,16 +178,49 @@ public class XContentParserUtilsTests extends ESTestCase { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); - Long parsedLong = parseTypedKeysObject(parser, delimiter, Long.class); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + SetOnce parsedLong = new SetOnce<>(); + parseTypedKeysObject(parser, delimiter, Long.class, parsedLong::set); assertNotNull(parsedLong); - assertEquals(longValue, parsedLong.longValue()); + assertEquals(longValue, parsedLong.get().longValue()); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); - Boolean parsedBoolean = parseTypedKeysObject(parser, delimiter, Boolean.class); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + SetOnce parsedBoolean = new SetOnce<>(); + parseTypedKeysObject(parser, delimiter, Boolean.class, parsedBoolean::set); assertNotNull(parsedBoolean); - assertEquals(boolValue, parsedBoolean); + assertEquals(boolValue, parsedBoolean.get()); ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); } } + + public void testParseTypedKeysObjectErrors() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + { + BytesReference bytes = toXContent((builder, params) -> builder.startObject("name").field("field", 0).endObject(), xContentType, + randomBoolean()); + try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, bytes)) { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); + ParsingException exception = expectThrows(ParsingException.class, + () -> parseTypedKeysObject(parser, "#", Boolean.class, o -> { + })); + assertEquals("Failed to parse object: unexpected token [FIELD_NAME] found", exception.getMessage()); + } + } + { + BytesReference bytes = toXContent((builder, params) -> builder.startObject("").field("field", 0).endObject(), xContentType, + randomBoolean()); + try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, bytes)) { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + ParsingException exception = expectThrows(ParsingException.class, + () -> parseTypedKeysObject(parser, "#", Boolean.class, o -> { + })); + assertEquals("Failed to parse object: empty key", exception.getMessage()); + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index 5e02da294c8..0475c324f06 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -62,7 +62,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.totalHits, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), @@ -88,7 +88,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.totalHits, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); @@ -129,7 +129,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.totalHits, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), @@ -150,7 +150,7 @@ public class VectorHighlighterTests extends ESTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.totalHits, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), diff --git a/core/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/core/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index f1b7415c679..50dfd92d82e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/core/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -197,35 +197,29 @@ public abstract class AbstractDisruptionTestCase extends ESIntegTestCase { } void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterState state = getNodeClusterState(node); - final DiscoveryNodes nodes = state.nodes(); - assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as master", nodes.getMasterNode()); - if (expectedBlocks != null) { - for (ClusterBlockLevel level : expectedBlocks.levels()) { - assertTrue("node [" + node + "] does have level [" + level + "] in it's blocks", state.getBlocks().hasGlobalBlock - (level)); - } + assertBusy(() -> { + ClusterState state = getNodeClusterState(node); + final DiscoveryNodes nodes = state.nodes(); + assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as master", nodes.getMasterNode()); + if (expectedBlocks != null) { + for (ClusterBlockLevel level : expectedBlocks.levels()) { + assertTrue("node [" + node + "] does have level [" + level + "] in it's blocks", state.getBlocks().hasGlobalBlock + (level)); } } }, maxWaitTime.getMillis(), TimeUnit.MILLISECONDS); } void assertDifferentMaster(final String node, final String oldMasterNode) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterState state = getNodeClusterState(node); - String masterNode = null; - if (state.nodes().getMasterNode() != null) { - masterNode = state.nodes().getMasterNode().getName(); - } - logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode()); - assertThat("node [" + node + "] still has [" + masterNode + "] as master", - oldMasterNode, not(equalTo(masterNode))); + assertBusy(() -> { + ClusterState state = getNodeClusterState(node); + String masterNode = null; + if (state.nodes().getMasterNode() != null) { + masterNode = state.nodes().getMasterNode().getName(); } + logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode()); + assertThat("node [" + node + "] still has [" + masterNode + "] as master", + oldMasterNode, not(equalTo(masterNode))); }, 10, TimeUnit.SECONDS); } diff --git a/core/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index d1b86aeaa16..07a76b108f3 100644 --- a/core/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.zen.PingContextProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenPing; @@ -41,6 +40,7 @@ import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.Stack; import java.util.concurrent.CompletableFuture; @@ -133,6 +133,11 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase { .put("transport.tcp.port", port + "-" + (port + 5 - 1)) .build(); } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } }; try (InternalTestCluster other = new InternalTestCluster( diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java index 84166bb3f96..5a5f279985f 100644 --- a/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java +++ b/core/src/test/java/org/elasticsearch/document/ShardInfoIT.java @@ -128,24 +128,21 @@ public class ShardInfoIT extends ESIntegTestCase { } private void ensureActiveShardCopies(final int shardId, final int copyCount) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertThat(state.routingTable().index("idx"), not(nullValue())); - assertThat(state.routingTable().index("idx").shard(shardId), not(nullValue())); - assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount)); + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertThat(state.routingTable().index("idx"), not(nullValue())); + assertThat(state.routingTable().index("idx").shard(shardId), not(nullValue())); + assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount)); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("idx") - .setWaitForNoRelocatingShards(true) - .get(); - assertThat(healthResponse.isTimedOut(), equalTo(false)); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("idx") + .setWaitForNoRelocatingShards(true) + .get(); + assertThat(healthResponse.isTimedOut(), equalTo(false)); - RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("idx") - .setActiveOnly(true) - .get(); - assertThat(recoveryResponse.shardRecoveryStates().get("idx").size(), equalTo(0)); - } + RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("idx") + .setActiveOnly(true) + .get(); + assertThat(recoveryResponse.shardRecoveryStates().get("idx").size(), equalTo(0)); }); } } diff --git a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java index 083e2ad5cc0..51391a8643b 100644 --- a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java @@ -73,28 +73,6 @@ public class EnvironmentTests extends ESTestCase { assertThat(environment.resolveRepoURL(new URL("jar:http://localhost/test/../repo1?blah!/repo/")), nullValue()); } - public void testDefaultPathData() { - final Path defaultPathData = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("default.path.data", defaultPathData) - .build(); - final Environment environment = new Environment(settings); - assertThat(environment.dataFiles(), equalTo(new Path[] { defaultPathData })); - } - - public void testPathDataOverrideDefaultPathData() { - final Path pathData = createTempDir().toAbsolutePath(); - final Path defaultPathData = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("path.data", pathData) - .put("default.path.data", defaultPathData) - .build(); - final Environment environment = new Environment(settings); - assertThat(environment.dataFiles(), equalTo(new Path[] { pathData })); - } - public void testPathDataWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); @@ -103,38 +81,10 @@ public class EnvironmentTests extends ESTestCase { } public void testPathDataNotSetInEnvironmentIfNotSet() { - final Path defaultPathData = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("default.path.data", defaultPathData) - .build(); + final Settings settings = Settings.builder().put("path.home", createTempDir().toAbsolutePath()).build(); assertFalse(Environment.PATH_DATA_SETTING.exists(settings)); - assertTrue(Environment.DEFAULT_PATH_DATA_SETTING.exists(settings)); final Environment environment = new Environment(settings); assertFalse(Environment.PATH_DATA_SETTING.exists(environment.settings())); - assertTrue(Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings())); - } - - public void testDefaultPathLogs() { - final Path defaultPathLogs = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("default.path.logs", defaultPathLogs) - .build(); - final Environment environment = new Environment(settings); - assertThat(environment.logsFile(), equalTo(defaultPathLogs)); - } - - public void testPathLogsOverrideDefaultPathLogs() { - final Path pathLogs = createTempDir().toAbsolutePath(); - final Path defaultPathLogs = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("path.logs", pathLogs) - .put("default.path.logs", defaultPathLogs) - .build(); - final Environment environment = new Environment(settings); - assertThat(environment.logsFile(), equalTo(pathLogs)); } public void testPathLogsWhenNotSet() { @@ -144,29 +94,21 @@ public class EnvironmentTests extends ESTestCase { assertThat(environment.logsFile(), equalTo(pathHome.resolve("logs"))); } - public void testDefaultPathConf() { - final Path defaultPathConf = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("default.path.conf", defaultPathConf) - .build(); - final Environment environment = new Environment(settings); - assertThat(environment.configFile(), equalTo(defaultPathConf)); + public void testDefaultConfigPath() { + final Path path = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder().put("path.home", path).build(); + final Environment environment = new Environment(settings, null); + assertThat(environment.configFile(), equalTo(path.resolve("config"))); } - public void testPathConfOverrideDefaultPathConf() { - final Path pathConf = createTempDir().toAbsolutePath(); - final Path defaultPathConf = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("path.conf", pathConf) - .put("default.path.conf", defaultPathConf) - .build(); - final Environment environment = new Environment(settings); - assertThat(environment.configFile(), equalTo(pathConf)); + public void testConfigPath() { + final Path configPath = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder().put("path.home", createTempDir().toAbsolutePath()).build(); + final Environment environment = new Environment(settings, configPath); + assertThat(environment.configFile(), equalTo(configPath)); } - public void testPathConfWhenNotSet() { + public void testConfigPathWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); final Environment environment = new Environment(settings); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 4210f9c32c1..9a5201c0ea8 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; @@ -318,7 +319,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // clean two nodes internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); - assertAcked(client().admin().indices().prepareCreate("test").setSettings("index.mapping.single_type", false)); + assertAcked(client().admin().indices().prepareCreate("test")); client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet(); client().admin().indices().prepareFlush().execute().actionGet(); client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet(); @@ -350,10 +351,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 3); } - logger.info("--> add some metadata, additional type and template"); - client().admin().indices().preparePutMapping("test").setType("type2") - .setSource(jsonBuilder().startObject().startObject("type2").endObject().endObject()) - .execute().actionGet(); + logger.info("--> add some metadata and additional template"); client().admin().indices().preparePutTemplate("template_1") .setTemplate("te*") .setOrder(0) @@ -381,7 +379,6 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { } ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); - assertThat(state.metaData().index("test").mapping("type2"), notNullValue()); assertThat(state.metaData().templates().get("template_1").patterns(), equalTo(Collections.singletonList("te*"))); assertThat(state.metaData().index("test").getAliases().get("test_alias"), notNullValue()); assertThat(state.metaData().index("test").getAliases().get("test_alias").filter(), notNullValue()); @@ -432,6 +429,10 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { } // prevent a sequence-number-based recovery from being possible + client(primaryNode).admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + ).get(); client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); return super.onNodeStopped(nodeName); } diff --git a/core/src/test/java/org/elasticsearch/get/GetActionIT.java b/core/src/test/java/org/elasticsearch/get/GetActionIT.java index e6439011cf6..f9c4b0d9606 100644 --- a/core/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/core/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.get; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -38,9 +39,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -58,6 +62,11 @@ import static org.hamcrest.Matchers.startsWith; public class GetActionIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testSimpleGet() { assertAcked(prepareCreate("test") .addMapping("type1", "field1", "type=keyword,store=true", "field2", "type=keyword,store=true") @@ -246,15 +255,55 @@ public class GetActionIT extends ESIntegTestCase { .startObject("field").field("type", "text").field("store", true).endObject() .endObject() .endObject().endObject().string(); - String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2") - .startObject("properties") - .startObject("field").field("type", "text").field("store", true).endObject() - .endObject() - .endObject().endObject().string(); assertAcked(prepareCreate("test") - .addMapping("type1", mapping1, XContentType.JSON) - .addMapping("type2", mapping2, XContentType.JSON) - .setSettings("index.refresh_interval", -1, "index.mapping.single_type", false)); + .addMapping("type1", mapping1, XContentType.JSON)); + ensureGreen(); + + GetResponse response = client().prepareGet("test", "type1", "1").get(); + assertThat(response.isExists(), equalTo(false)); + assertThat(response.isExists(), equalTo(false)); + + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); + + response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); + assertThat(response.isExists(), equalTo(true)); + assertThat(response.getId(), equalTo("1")); + assertThat(response.getType(), equalTo("type1")); + Set fields = new HashSet<>(response.getFields().keySet()); + assertThat(fields, equalTo(singleton("field"))); + assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); + assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1")); + assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); + + // Now test values being fetched from stored fields. + refresh(); + response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); + assertThat(response.isExists(), equalTo(true)); + assertThat(response.getId(), equalTo("1")); + fields = new HashSet<>(response.getFields().keySet()); + assertThat(fields, equalTo(singleton("field"))); + assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); + assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1")); + assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); + } + + public void testGetDocWithMultivaluedFieldsMultiTypeBWC() throws Exception { + assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); + String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties") + .startObject("field").field("type", "text").field("store", true).endObject() + .endObject() + .endObject().endObject().string(); + String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2") + .startObject("properties") + .startObject("field").field("type", "text").field("store", true).endObject() + .endObject() + .endObject().endObject().string(); + assertAcked(prepareCreate("test") + .addMapping("type1", mapping1, XContentType.JSON) + .addMapping("type2", mapping2, XContentType.JSON) + .setSettings("index.refresh_interval", -1, "index.version.created", Version.V_5_6_0.id)); // multi types in 5.6 ensureGreen(); GetResponse response = client().prepareGet("test", "type1", "1").get(); @@ -263,10 +312,10 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.isExists(), equalTo(false)); client().prepareIndex("test", "type1", "1") - .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); + .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); client().prepareIndex("test", "type2", "1") - .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); + .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); assertThat(response.isExists(), equalTo(true)); @@ -524,12 +573,47 @@ public class GetActionIT extends ESIntegTestCase { assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2")); } - public void testGetFieldsMetaData() throws Exception { + public void testGetFieldsMetaDataWithRouting() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("doc", "field1", "type=keyword,store=true") + .addAlias(new Alias("alias")) + .setSettings("index.refresh_interval", -1, "index.version.created", Version.V_5_6_0.id)); // multi types in 5.6 + + client().prepareIndex("test", "doc", "1") + .setRouting("1") + .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) + .get(); + + GetResponse getResponse = client().prepareGet(indexOrAlias(), "doc", "1") + .setRouting("1") + .setStoredFields("field1") + .get(); + assertThat(getResponse.isExists(), equalTo(true)); + assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); + assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); + assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); + assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); + + flush(); + + getResponse = client().prepareGet(indexOrAlias(), "doc", "1") + .setStoredFields("field1") + .setRouting("1") + .get(); + assertThat(getResponse.isExists(), equalTo(true)); + assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); + assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); + assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); + assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); + } + + public void testGetFieldsMetaDataWithParentChild() throws Exception { + assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); assertAcked(prepareCreate("test") .addMapping("parent") .addMapping("my-type1", "_parent", "type=parent", "field1", "type=keyword,store=true") .addAlias(new Alias("alias")) - .setSettings("index.refresh_interval", -1, "index.mapping.single_type", false)); + .setSettings("index.refresh_interval", -1, "index.version.created", Version.V_5_6_0.id)); // multi types in 5.6 client().prepareIndex("test", "my-type1", "1") .setRouting("1") @@ -593,7 +677,7 @@ public class GetActionIT extends ESIntegTestCase { public void testGetFieldsComplexField() throws Exception { assertAcked(prepareCreate("my-index") - .setSettings("index.refresh_interval", -1, "index.mapping.single_type", false) + .setSettings("index.refresh_interval", -1, "index.version.created", Version.V_5_6_0.id) // multi types in 5.6 .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties") .startObject("field1").field("type", "object").startObject("properties") .startObject("field2").field("type", "object").startObject("properties") @@ -725,34 +809,24 @@ public class GetActionIT extends ESIntegTestCase { String createIndexSource = "{\n" + " \"settings\": {\n" + " \"index.translog.flush_threshold_size\": \"1pb\",\n" + - " \"index.mapping.single_type\": false," + " \"refresh_interval\": \"-1\"\n" + - " },\n" + - " \"mappings\": {\n" + - " \"parentdoc\": {\n" + - " },\n" + - " \"doc\": {\n" + - " \"_parent\": {\n" + - " \"type\": \"parentdoc\"\n" + - " }\n" + - " }\n" + " }\n" + "}"; assertAcked(prepareCreate("test") .addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); ensureGreen(); - client().prepareIndex("test", "doc").setId("1").setSource("{}", XContentType.JSON).setParent("1").get(); + client().prepareIndex("test", "doc", "1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get(); - String[] fieldsList = {"_parent"}; + String[] fieldsList = {"_routing"}; // before refresh - document is only in translog - assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1"); + assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "routingValue"); refresh(); //after refresh - document is in translog and also indexed - assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1"); + assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "routingValue"); flush(); //after flush - document is in not anymore translog - only indexed - assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1"); + assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "routingValue"); } public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java index 2708387da12..49db663ed12 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java @@ -31,12 +31,12 @@ public class HunspellTokenFilterFactoryTests extends ESTestCase { public void testDedup() throws IOException { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put("index.analysis.filter.en_US.type", "hunspell") .put("index.analysis.filter.en_US.locale", "en_US") .build(); - TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + TestAnalysis analysis = + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, getDataPath("/indices/analyze/conf_dir")); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("en_US"); assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class)); HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter; @@ -44,13 +44,12 @@ public class HunspellTokenFilterFactoryTests extends ESTestCase { settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put("index.analysis.filter.en_US.type", "hunspell") .put("index.analysis.filter.en_US.dedup", false) .put("index.analysis.filter.en_US.locale", "en_US") .build(); - analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, getDataPath("/indices/analyze/conf_dir")); tokenFilter = analysis.tokenFilter.get("en_US"); assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class)); hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter; diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactoryTests.java new file mode 100644 index 00000000000..39b96a2cae4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactoryTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.io.StringReader; + +public class PathHierarchyTokenizerFactoryTests extends ESTokenStreamTestCase { + + public void testDefaults() throws IOException { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + Tokenizer tokenizer = new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", Settings.EMPTY).create(); + tokenizer.setReader(new StringReader("/one/two/three")); + assertTokenStreamContents(tokenizer, new String[] {"/one", "/one/two", "/one/two/three"}); + } + + public void testReverse() throws IOException { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + Settings settings = newAnalysisSettingsBuilder().put("reverse", true).build(); + Tokenizer tokenizer = new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", settings).create(); + tokenizer.setReader(new StringReader("/one/two/three")); + assertTokenStreamContents(tokenizer, new String[] {"/one/two/three", "one/two/three", "two/three", "three"}); + } + + public void testDelimiter() throws IOException { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + Settings settings = newAnalysisSettingsBuilder().put("delimiter", "-").build(); + Tokenizer tokenizer = new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", settings).create(); + tokenizer.setReader(new StringReader("/one/two/three")); + assertTokenStreamContents(tokenizer, new String[] {"/one/two/three"}); + tokenizer.setReader(new StringReader("one-two-three")); + assertTokenStreamContents(tokenizer, new String[] {"one", "one-two", "one-two-three"}); + } + + public void testReplace() throws IOException { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + Settings settings = newAnalysisSettingsBuilder().put("replacement", "-").build(); + Tokenizer tokenizer = new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", settings).create(); + tokenizer.setReader(new StringReader("/one/two/three")); + assertTokenStreamContents(tokenizer, new String[] {"-one", "-one-two", "-one-two-three"}); + tokenizer.setReader(new StringReader("one-two-three")); + assertTokenStreamContents(tokenizer, new String[] {"one-two-three"}); + } + + public void testSkip() throws IOException { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + Settings settings = newAnalysisSettingsBuilder().put("skip", 2).build(); + Tokenizer tokenizer = new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", settings).create(); + tokenizer.setReader(new StringReader("/one/two/three/four/five")); + assertTokenStreamContents(tokenizer, new String[] {"/three", "/three/four", "/three/four/five"}); + } + + public void testDelimiterExceptions() { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + { + String delimiter = RandomPicks.randomFrom(random(), new String[] {"--", ""}); + Settings settings = newAnalysisSettingsBuilder().put("delimiter", delimiter).build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", settings).create()); + assertEquals("delimiter must be a one char value", e.getMessage()); + } + { + String replacement = RandomPicks.randomFrom(random(), new String[] {"--", ""}); + Settings settings = newAnalysisSettingsBuilder().put("replacement", replacement).build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new PathHierarchyTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, + "path-hierarchy-tokenizer", settings).create()); + assertEquals("replacement must be a one char value", e.getMessage()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java index c4842e497ef..b5640cdd120 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.queryparser.classic.ParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.Loggers; @@ -41,6 +42,8 @@ import java.nio.file.Files; import java.nio.file.Path; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; public class SynonymsAnalysisTests extends ESTestCase { protected final Logger logger = Loggers.getLogger(getClass()); @@ -69,8 +72,57 @@ public class SynonymsAnalysisTests extends ESTestCase { match("synonymAnalyzerWordnet", "abstain", "abstain refrain desist"); match("synonymAnalyzerWordnet_file", "abstain", "abstain refrain desist"); match("synonymAnalyzerWithsettings", "kimchy", "sha hay"); + match("synonymAnalyzerWithStopAfterSynonym", "kimchy is the dude abides , stop", "shay is the elasticsearch man! ,"); + match("synonymAnalyzerWithStopBeforeSynonym", "kimchy is the dude abides , stop", "shay is the elasticsearch man! ,"); + match("synonymAnalyzerWithStopSynonymAfterSynonym", "kimchy is the dude abides", "shay is the man!"); + match("synonymAnalyzerExpand", "kimchy is the dude abides", "kimchy shay is the dude elasticsearch abides man!"); + match("synonymAnalyzerExpandWithStopAfterSynonym", "kimchy is the dude abides", "shay is the dude abides man!"); + } + public void testSynonymWordDeleteByAnalyzer() throws IOException { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()) + .put("index.analysis.filter.synonym.type", "synonym") + .putArray("index.analysis.filter.synonym.synonyms", "kimchy => shay", "dude => elasticsearch", "abides => man!") + .put("index.analysis.filter.stop_within_synonym.type", "stop") + .putArray("index.analysis.filter.stop_within_synonym.stopwords", "kimchy", "elasticsearch") + .put("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.tokenizer", "whitespace") + .putArray("index.analysis.analyzer.synonymAnalyzerWithStopSynonymBeforeSynonym.filter", "stop_within_synonym","synonym") + .put().build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try { + indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers; + fail("fail! due to synonym word deleted by analyzer"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("failed to build synonyms")); + } + } + + public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()) + .put("index.analysis.filter.synonym_expand.type", "synonym") + .putArray("index.analysis.filter.synonym_expand.synonyms", "kimchy, shay", "dude, elasticsearch", "abides, man!") + .put("index.analysis.filter.stop_within_synonym.type", "stop") + .putArray("index.analysis.filter.stop_within_synonym.stopwords", "kimchy", "elasticsearch") + .put("index.analysis.analyzer.synonymAnalyzerExpandWithStopBeforeSynonym.tokenizer", "whitespace") + .putArray("index.analysis.analyzer.synonymAnalyzerExpandWithStopBeforeSynonym.filter", "stop_within_synonym","synonym_expand") + .put().build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try { + indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers; + fail("fail! due to synonym word deleted by analyzer"); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("failed to build synonyms")); + } + } + + private void match(String analyzerName, String source, String target) throws IOException { Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer(); diff --git a/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index d21273a7b03..d1eef05c2ef 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -39,7 +40,7 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testPassThrough() throws IOException { SnapshotDeletionPolicy indexDeletionPolicy = mock(SnapshotDeletionPolicy.class); - CombinedDeletionPolicy combinedDeletionPolicy = new CombinedDeletionPolicy(indexDeletionPolicy, new TranslogDeletionPolicy(), + CombinedDeletionPolicy combinedDeletionPolicy = new CombinedDeletionPolicy(indexDeletionPolicy, createTranslogDeletionPolicy(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG); List commitList = new ArrayList<>(); long count = randomIntBetween(1, 3); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 16e746a67f7..e9c89166348 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -89,6 +89,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -117,16 +118,15 @@ import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.IndexSearcherWrapper; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.index.shard.TranslogOpToEngineOpConverter; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.DummyShardLock; @@ -162,6 +162,7 @@ import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -180,6 +181,8 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANS import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.elasticsearch.index.mapper.SourceToParse.source; +import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -336,7 +339,7 @@ public class InternalEngineTests extends ESTestCase { protected Translog createTranslog(Path translogPath) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - return new Translog(translogConfig, null, new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { @@ -863,14 +866,14 @@ public class InternalEngineTests extends ESTestCase { recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)) { @Override public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { - assertThat(getTranslog().totalOperations(), equalTo(docs)); + assertThat(getTranslog().uncommittedOperations(), equalTo(docs)); final CommitId commitId = super.flush(force, waitIfOngoing); flushed.set(true); return commitId; } }; - assertThat(recoveringEngine.getTranslog().totalOperations(), equalTo(docs)); + assertThat(recoveringEngine.getTranslog().uncommittedOperations(), equalTo(docs)); recoveringEngine.recoverFromTranslog(); assertTrue(flushed.get()); } finally { @@ -1960,7 +1963,7 @@ public class InternalEngineTests extends ESTestCase { final String formattedMessage = event.getMessage().getFormattedMessage(); if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][0] ")) { if (event.getLoggerName().endsWith(".IW") && - formattedMessage.contains("IW: apply all deletes during flush")) { + formattedMessage.contains("IW: now apply all deletes")) { sawIndexWriterMessage = true; } if (event.getLoggerName().endsWith(".IFD")) { @@ -2019,7 +2022,10 @@ public class InternalEngineTests extends ESTestCase { initialEngine = engine; initialEngine .seqNoService() - .updateAllocationIdsFromMaster(new HashSet<>(Arrays.asList("primary", "replica")), Collections.emptySet()); + .updateAllocationIdsFromMaster( + randomNonNegativeLong(), + new HashSet<>(Arrays.asList("primary", "replica")), + Collections.emptySet()); for (int op = 0; op < opCount; op++) { final String id; // mostly index, sometimes delete @@ -2133,6 +2139,7 @@ public class InternalEngineTests extends ESTestCase { final int numDocsPerThread = randomIntBetween(500, 1000); final CyclicBarrier barrier = new CyclicBarrier(numIndexingThreads + 1); final List indexingThreads = new ArrayList<>(); + final CountDownLatch doneLatch = new CountDownLatch(numIndexingThreads); // create N indexing threads to index documents simultaneously for (int threadNum = 0; threadNum < numIndexingThreads; threadNum++) { final int threadIdx = threadNum; @@ -2147,7 +2154,10 @@ public class InternalEngineTests extends ESTestCase { } } catch (Exception e) { throw new RuntimeException(e); + } finally { + doneLatch.countDown(); } + }); indexingThreads.add(indexingThread); } @@ -2157,12 +2167,19 @@ public class InternalEngineTests extends ESTestCase { thread.start(); } barrier.await(); // wait for indexing threads to all be ready to start - + int commitLimit = randomIntBetween(10, 20); + long sleepTime = 1; // create random commit points boolean doneIndexing; do { - doneIndexing = indexingThreads.stream().filter(Thread::isAlive).count() == 0; + doneIndexing = doneLatch.await(sleepTime, TimeUnit.MILLISECONDS); commits.add(engine.acquireIndexCommit(true)); + if (commits.size() > commitLimit) { // don't keep on piling up too many commits + IOUtils.close(commits.remove(randomIntBetween(0, commits.size()-1))); + // we increase the wait time to make sure we eventually if things are slow wait for threads to finish. + // this will reduce pressure on disks and will allow threads to make progress without piling up too many commits + sleepTime = sleepTime * 2; + } } while (doneIndexing == false); // now, verify all the commits have the correct docs according to the user commit data @@ -2488,10 +2505,19 @@ public class InternalEngineTests extends ESTestCase { } public void testTranslogCleanUpPostCommitCrash() throws Exception { + IndexSettings indexSettings = new IndexSettings(defaultSettings.getIndexMetaData(), defaultSettings.getNodeSettings(), + defaultSettings.getScopedSettings()); + IndexMetaData.Builder builder = IndexMetaData.builder(indexSettings.getIndexMetaData()); + builder.settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + ); + indexSettings.updateIndexMetaData(builder.build()); + try (Store store = createStore()) { AtomicBoolean throwErrorOnCommit = new AtomicBoolean(); final Path translogPath = createTempDir(); - try (InternalEngine engine = new InternalEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null)) { + try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null)) { @Override protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { super.commitIndexWriter(writer, translog, syncId); @@ -2506,7 +2532,7 @@ public class InternalEngineTests extends ESTestCase { FlushFailedEngineException e = expectThrows(FlushFailedEngineException.class, engine::flush); assertThat(e.getCause().getMessage(), equalTo("power's out")); } - try (InternalEngine engine = new InternalEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null))) { + try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null))) { engine.recoverFromTranslog(); assertVisibleCount(engine, 1); final long committedGen = Long.valueOf( @@ -2532,7 +2558,7 @@ public class InternalEngineTests extends ESTestCase { engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); - assertThat(topDocs.totalHits, equalTo(0)); + assertThat(topDocs.totalHits, equalTo(0L)); } } @@ -2623,7 +2649,7 @@ public class InternalEngineTests extends ESTestCase { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + numExtraDocs)); - assertThat(topDocs.totalHits, equalTo(numDocs + numExtraDocs)); + assertThat(topDocs.totalHits, equalTo((long) numDocs + numExtraDocs)); } } IOUtils.close(store, directory); @@ -2691,14 +2717,14 @@ public class InternalEngineTests extends ESTestCase { assertThat(result.getVersion(), equalTo(2L)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); - assertThat(topDocs.totalHits, equalTo(numDocs + 1)); + assertThat(topDocs.totalHits, equalTo(numDocs + 1L)); } engine.close(); engine = createEngine(store, primaryTranslogDir); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); - assertThat(topDocs.totalHits, equalTo(numDocs + 1)); + assertThat(topDocs.totalHits, equalTo(numDocs + 1L)); } parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); assertEquals(flush ? 1 : 2, parser.appliedOperations.get()); @@ -2711,12 +2737,11 @@ public class InternalEngineTests extends ESTestCase { } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs); - assertThat(topDocs.totalHits, equalTo(numDocs)); + assertThat(topDocs.totalHits, equalTo((long) numDocs)); } } - public static class TranslogHandler extends TranslogOpToEngineOpConverter - implements EngineConfig.TranslogRecoveryRunner { + public static class TranslogHandler implements EngineConfig.TranslogRecoveryRunner { private final MapperService mapperService; public Mapping mappingUpdate = null; @@ -2724,7 +2749,6 @@ public class InternalEngineTests extends ESTestCase { private final AtomicLong appliedOperations = new AtomicLong(); public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) { - super(new ShardId("test", "_na_", 0), null); NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap(), Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); @@ -2733,8 +2757,7 @@ public class InternalEngineTests extends ESTestCase { () -> null); } - @Override - protected DocumentMapperForType docMapper(String type) { + private DocumentMapperForType docMapper(String type) { RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder(type); DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService); return new DocumentMapperForType(b.build(mapperService), mappingUpdate); @@ -2779,6 +2802,33 @@ public class InternalEngineTests extends ESTestCase { } return opsRecovered; } + + private Engine.Operation convertToEngineOp(Translog.Operation operation, Engine.Operation.Origin origin) { + switch (operation.opType()) { + case INDEX: + final Translog.Index index = (Translog.Index) operation; + final String indexName = mapperService.index().getName(); + final Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), + source(indexName, index.type(), index.id(), index.source(), XContentFactory.xContentType(index.source())) + .routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(), + index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, + index.getAutoGeneratedIdTimestamp(), true); + return engineIndex; + case DELETE: + final Translog.Delete delete = (Translog.Delete) operation; + final Engine.Delete engineDelete = new Engine.Delete(delete.type(), delete.id(), delete.uid(), delete.seqNo(), + delete.primaryTerm(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), + origin, System.nanoTime()); + return engineDelete; + case NO_OP: + final Translog.NoOp noOp = (Translog.NoOp) operation; + final Engine.NoOp engineNoOp = + new Engine.NoOp(noOp.seqNo(), noOp.primaryTerm(), origin, System.nanoTime(), noOp.reason()); + return engineNoOp; + default: + throw new IllegalStateException("No operation defined for [" + operation + "]"); + } + } } public void testRecoverFromForeignTranslog() throws IOException { @@ -2795,7 +2845,7 @@ public class InternalEngineTests extends ESTestCase { Translog translog = new Translog( new TranslogConfig(shardId, createTempDir(), INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - null, new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2906,7 +2956,7 @@ public class InternalEngineTests extends ESTestCase { assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); assertEquals(1, engine.getTranslog().currentFileGeneration()); - assertEquals(0L, engine.getTranslog().totalOperations()); + assertEquals(0L, engine.getTranslog().uncommittedOperations()); } } @@ -3823,7 +3873,7 @@ public class InternalEngineTests extends ESTestCase { System.nanoTime(), reason)); assertThat(noOpEngine.seqNoService().getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); - assertThat(noOpEngine.getTranslog().totalOperations(), equalTo(1 + gapsFilled)); + assertThat(noOpEngine.getTranslog().uncommittedOperations(), equalTo(1 + gapsFilled)); // skip to the op that we added to the translog Translog.Operation op; Translog.Operation last = null; @@ -3969,7 +4019,7 @@ public class InternalEngineTests extends ESTestCase { assertEquals(maxSeqIDOnReplica, replicaEngine.seqNoService().getMaxSeqNo()); assertEquals(checkpointOnReplica, replicaEngine.seqNoService().getLocalCheckpoint()); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); - assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().totalOperations()); + assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().uncommittedOperations()); recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.seqNoService().getLocalCheckpoint()); @@ -4000,7 +4050,7 @@ public class InternalEngineTests extends ESTestCase { try { recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); if (flushed) { - assertEquals(0, recoveringEngine.getTranslog().totalOperations()); + assertEquals(0, recoveringEngine.getTranslog().uncommittedOperations()); } recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getMaxSeqNo()); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java index c22114e28aa..df6328feabc 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -114,7 +114,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3)); + assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one())); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -125,7 +125,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3)); + assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(2)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); assertThat(topDocs.scoreDocs[2].doc, equalTo(1)); @@ -191,7 +191,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3)); + assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -199,7 +199,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes ; sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3)); + assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); @@ -258,7 +258,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(8)); + assertThat(topDocs.totalHits, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(7)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08")); @@ -280,7 +280,7 @@ public abstract class AbstractFieldDataImplTestCase extends AbstractFieldDataTes sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(8)); + assertThat(topDocs.totalHits, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(6)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10")); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 99aa0816082..b9e3a0813b2 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -134,7 +134,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { @Before public void setup() throws Exception { - indexService = createIndex("test", Settings.builder().put("mapping.single_type", false).build()); + indexService = createIndex("test", Settings.builder().build()); mapperService = indexService.mapperService(); indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache(); ifdService = indexService.fieldData(); diff --git a/core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java b/core/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java similarity index 61% rename from core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java rename to core/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java index 62cd45508d8..0b8549e0053 100644 --- a/core/src/test/java/org/elasticsearch/index/get/GetFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -41,62 +42,63 @@ import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; -public class GetFieldTests extends ESTestCase { +public class DocumentFieldTests extends ESTestCase { public void testToXContent() { - GetField getField = new GetField("field", Arrays.asList("value1", "value2")); - String output = Strings.toString(getField); + DocumentField documentField = new DocumentField("field", Arrays.asList("value1", "value2")); + String output = Strings.toString(documentField); assertEquals("{\"field\":[\"value1\",\"value2\"]}", output); } public void testEqualsAndHashcode() { - checkEqualsAndHashCode(randomGetField(XContentType.JSON).v1(), GetFieldTests::copyGetField, GetFieldTests::mutateGetField); + checkEqualsAndHashCode(randomDocumentField(XContentType.JSON).v1(), DocumentFieldTests::copyDocumentField, + DocumentFieldTests::mutateDocumentField); } public void testToAndFromXContent() throws Exception { XContentType xContentType = randomFrom(XContentType.values()); - Tuple tuple = randomGetField(xContentType); - GetField getField = tuple.v1(); - GetField expectedGetField = tuple.v2(); + Tuple tuple = randomDocumentField(xContentType); + DocumentField documentField = tuple.v1(); + DocumentField expectedDocumentField = tuple.v2(); boolean humanReadable = randomBoolean(); - BytesReference originalBytes = toShuffledXContent(getField, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference originalBytes = toShuffledXContent(documentField, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); //test that we can parse what we print out - GetField parsedGetField; + DocumentField parsedDocumentField; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { //we need to move to the next token, the start object one that we manually added is not expected assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); - parsedGetField = GetField.fromXContent(parser); + parsedDocumentField = DocumentField.fromXContent(parser); assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); } - assertEquals(expectedGetField, parsedGetField); - BytesReference finalBytes = toXContent(parsedGetField, xContentType, humanReadable); + assertEquals(expectedDocumentField, parsedDocumentField); + BytesReference finalBytes = toXContent(parsedDocumentField, xContentType, humanReadable); assertToXContentEquivalent(originalBytes, finalBytes, xContentType); } - private static GetField copyGetField(GetField getField) { - return new GetField(getField.getName(), getField.getValues()); + private static DocumentField copyDocumentField(DocumentField documentField) { + return new DocumentField(documentField.getName(), documentField.getValues()); } - private static GetField mutateGetField(GetField getField) { - List> mutations = new ArrayList<>(); - mutations.add(() -> new GetField(randomUnicodeOfCodepointLength(15), getField.getValues())); - mutations.add(() -> new GetField(getField.getName(), randomGetField(XContentType.JSON).v1().getValues())); + private static DocumentField mutateDocumentField(DocumentField documentField) { + List> mutations = new ArrayList<>(); + mutations.add(() -> new DocumentField(randomUnicodeOfCodepointLength(15), documentField.getValues())); + mutations.add(() -> new DocumentField(documentField.getName(), randomDocumentField(XContentType.JSON).v1().getValues())); return randomFrom(mutations).get(); } - public static Tuple randomGetField(XContentType xContentType) { + public static Tuple randomDocumentField(XContentType xContentType) { if (randomBoolean()) { String fieldName = randomFrom(ParentFieldMapper.NAME, RoutingFieldMapper.NAME, UidFieldMapper.NAME); - GetField getField = new GetField(fieldName, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); - return Tuple.tuple(getField, getField); + DocumentField documentField = new DocumentField(fieldName, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); + return Tuple.tuple(documentField, documentField); } String fieldName = randomAlphaOfLengthBetween(3, 10); Tuple, List> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType); - GetField input = new GetField(fieldName, tuple.v1()); - GetField expected = new GetField(fieldName, tuple.v2()); + DocumentField input = new DocumentField(fieldName, tuple.v1()); + DocumentField expected = new DocumentField(fieldName, tuple.v2()); return Tuple.tuple(input, expected); } } diff --git a/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java index c23a648aff9..a38d183299c 100644 --- a/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java +++ b/core/src/test/java/org/elasticsearch/index/get/GetResultTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,7 +43,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.index.get.GetFieldTests.randomGetField; +import static org.elasticsearch.index.get.DocumentFieldTests.randomDocumentField; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -72,7 +73,7 @@ public class GetResultTests extends ESTestCase { public void testToXContent() throws IOException { { GetResult getResult = new GetResult("index", "type", "id", 1, true, new BytesArray("{ \"field1\" : " + - "\"value1\", \"field2\":\"value2\"}"), singletonMap("field1", new GetField("field1", + "\"value1\", \"field2\":\"value2\"}"), singletonMap("field1", new DocumentField("field1", singletonList("value1")))); String output = Strings.toString(getResult); assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"found\":true,\"_source\":{ \"field1\" " + @@ -115,9 +116,9 @@ public class GetResultTests extends ESTestCase { } public void testToXContentEmbedded() throws IOException { - Map fields = new HashMap<>(); - fields.put("foo", new GetField("foo", singletonList("bar"))); - fields.put("baz", new GetField("baz", Arrays.asList("baz_0", "baz_1"))); + Map fields = new HashMap<>(); + fields.put("foo", new DocumentField("foo", singletonList("bar"))); + fields.put("baz", new DocumentField("baz", Arrays.asList("baz_0", "baz_1"))); GetResult getResult = new GetResult("index", "type", "id", 2, true, new BytesArray("{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}"), fields); @@ -169,7 +170,7 @@ public class GetResultTests extends ESTestCase { mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), getResult.isExists(), RandomObjects.randomSource(random()), getResult.getFields())); mutations.add(() -> new GetResult(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), - getResult.isExists(), getResult.internalSourceRef(), randomGetFields(XContentType.JSON).v1())); + getResult.isExists(), getResult.internalSourceRef(), randomDocumentFields(XContentType.JSON).v1())); return randomFrom(mutations).get(); } @@ -180,8 +181,8 @@ public class GetResultTests extends ESTestCase { final long version; final boolean exists; BytesReference source = null; - Map fields = null; - Map expectedFields = null; + Map fields = null; + Map expectedFields = null; if (frequently()) { version = randomNonNegativeLong(); exists = true; @@ -189,7 +190,7 @@ public class GetResultTests extends ESTestCase { source = RandomObjects.randomSource(random()); } if (randomBoolean()) { - Tuple, Map> tuple = randomGetFields(xContentType); + Tuple, Map> tuple = randomDocumentFields(xContentType); fields = tuple.v1(); expectedFields = tuple.v2(); } @@ -202,14 +203,14 @@ public class GetResultTests extends ESTestCase { return Tuple.tuple(getResult, expectedGetResult); } - private static Tuple,Map> randomGetFields(XContentType xContentType) { + private static Tuple,Map> randomDocumentFields(XContentType xContentType) { int numFields = randomIntBetween(2, 10); - Map fields = new HashMap<>(numFields); - Map expectedFields = new HashMap<>(numFields); + Map fields = new HashMap<>(numFields); + Map expectedFields = new HashMap<>(numFields); for (int i = 0; i < numFields; i++) { - Tuple tuple = randomGetField(xContentType); - GetField getField = tuple.v1(); - GetField expectedGetField = tuple.v2(); + Tuple tuple = randomDocumentField(xContentType); + DocumentField getField = tuple.v1(); + DocumentField expectedGetField = tuple.v2(); fields.put(getField.getName(), getField); expectedFields.put(expectedGetField.getName(), expectedGetField); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index d3d099672ba..366b5db92d2 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -151,11 +151,10 @@ public class DocumentParserTests extends ESSingleNodeTestCase { public void testNestedHaveIdAndTypeFields() throws Exception { DocumentMapperParser mapperParser1 = createIndex("index1", Settings.builder() - .put("index.mapping.single_type", false).build() - ).mapperService().documentMapperParser(); - DocumentMapperParser mapperParser2 = createIndex("index2", Settings.builder() - .put("index.mapping.single_type", true).build() + .put("index.version.created", Version.V_5_6_0) // allows for multiple types + .build() ).mapperService().documentMapperParser(); + DocumentMapperParser mapperParser2 = createIndex("index2").mapperService().documentMapperParser(); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties"); { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index 2acd6b5c987..3f2dc286108 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -70,25 +70,25 @@ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").fieldType().termQuery("value1", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").fieldType().termQuery("1", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").fieldType().termQuery("1.1", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").fieldType().termQuery("2010-01-01", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("1", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("2", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").fieldType().termQuery("3", context), 10); - assertThat(topDocs.totalHits, equalTo(2)); + assertThat(topDocs.totalHits, equalTo(2L)); writer.close(); reader.close(); dir.close(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 91a498541ed..d183242ee19 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -18,26 +18,39 @@ */ package org.elasticsearch.index.mapper; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.TypeMissingException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; public class DynamicMappingIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testConflictingDynamicMappings() { // we don't use indexRandom because the order of requests is important here createIndex("index"); @@ -71,7 +84,21 @@ public class DynamicMappingIT extends ESIntegTestCase { } public void testMappingsPropagatedToMasterNodeImmediately() throws IOException { - assertAcked(prepareCreate("index").setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate("index")); + + // works when the type has been dynamically created + client().prepareIndex("index", "type", "1").setSource("foo", 3).get(); + GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); + assertMappingsHaveField(mappings, "index", "type", "foo"); + + // works if the type already existed + client().prepareIndex("index", "type", "1").setSource("bar", "baz").get(); + mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); + assertMappingsHaveField(mappings, "index", "type", "bar"); + } + + public void testMappingsPropagatedToMasterNodeImmediatelyMultiType() throws IOException { + assertAcked(prepareCreate("index").setSettings("index.version.created", Version.V_5_6_0.id)); // allows for multiple types // works when the type has been dynamically created client().prepareIndex("index", "type", "1").setSource("foo", 3).get(); @@ -144,6 +171,13 @@ public class DynamicMappingIT extends ESIntegTestCase { assertEquals("type[bar] missing", e1.getMessage()); assertEquals("trying to auto create mapping, but dynamic mapping is disabled", e1.getCause().getMessage()); + BulkResponse bulkResponse = client().prepareBulk().add(new IndexRequest("index_2", "bar", "2").source("field", "abc")).get(); + assertTrue(bulkResponse.hasFailures()); + BulkItemResponse.Failure firstFailure = bulkResponse.getItems()[0].getFailure(); + assertThat(firstFailure.getCause(), instanceOf(TypeMissingException.class)); + assertEquals("type[bar] missing", firstFailure.getCause().getMessage()); + assertEquals("trying to auto create mapping, but dynamic mapping is disabled", firstFailure.getCause().getCause().getMessage()); + // make sure no mappings were created for bar GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("index_2").get(); assertFalse(getIndexResponse.mappings().containsKey("bar")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index daf3d99ad5b..06c31f4dd18 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -36,9 +36,13 @@ import org.elasticsearch.index.mapper.BooleanFieldMapper.BooleanFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -49,6 +53,11 @@ import static org.hamcrest.Matchers.nullValue; public class DynamicMappingTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testDynamicTrue() throws IOException { String mapping = jsonBuilder().startObject().startObject("type") .field("dynamic", "true") @@ -183,9 +192,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { XContentBuilder mapping = jsonBuilder().startObject().startObject("_default_") .field("dynamic", "strict") .endObject().endObject(); - - IndexService indexService = createIndex("test", Settings.EMPTY, "_default_", mapping); - + createIndex("test", Settings.EMPTY, "_default_", mapping); try { client().prepareIndex().setIndex("test").setType("type").setSource(jsonBuilder().startObject().field("test", "test").endObject()).get(); fail(); @@ -525,9 +532,9 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { } public void testMixTemplateMultiFieldAndMappingReuse() throws Exception { - IndexService indexService = createIndex("test", Settings.builder().put("mapping.single_type", false).build()); + IndexService indexService = createIndex("test"); XContentBuilder mappings1 = jsonBuilder().startObject() - .startObject("type1") + .startObject("doc") .startArray("dynamic_templates") .startObject() .startObject("template1") @@ -544,20 +551,60 @@ public class DynamicMappingTests extends ESSingleNodeTestCase { .endObject() .endArray() .endObject().endObject(); - indexService.mapperService().merge("type1", new CompressedXContent(mappings1.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); - XContentBuilder mappings2 = jsonBuilder().startObject() - .startObject("type2") - .startObject("properties") - .startObject("field") - .field("type", "text") - .endObject() - .endObject() - .endObject().endObject(); - indexService.mapperService().merge("type2", new CompressedXContent(mappings2.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + indexService.mapperService().merge("doc", new CompressedXContent(mappings1.bytes()), + MapperService.MergeReason.MAPPING_UPDATE, false); XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("field", "foo") .endObject(); + SourceToParse source = SourceToParse.source("test", "doc", "1", json.bytes(), json.contentType()); + DocumentMapper mapper = indexService.mapperService().documentMapper("doc"); + assertNull(mapper.mappers().getMapper("field.raw")); + ParsedDocument parsed = mapper.parse(source); + assertNotNull(parsed.dynamicMappingsUpdate()); + + indexService.mapperService().merge("doc", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), + MapperService.MergeReason.MAPPING_UPDATE, false); + mapper = indexService.mapperService().documentMapper("doc"); + assertNotNull(mapper.mappers().getMapper("field.raw")); + parsed = mapper.parse(source); + assertNull(parsed.dynamicMappingsUpdate()); + } + + public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception { + IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); + XContentBuilder mappings1 = jsonBuilder().startObject() + .startObject("type1") + .startArray("dynamic_templates") + .startObject() + .startObject("template1") + .field("match_mapping_type", "string") + .startObject("mapping") + .field("type", "text") + .startObject("fields") + .startObject("raw") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endArray() + .endObject().endObject(); + indexService.mapperService().merge("type1", new CompressedXContent(mappings1.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + XContentBuilder mappings2 = jsonBuilder().startObject() + .startObject("type2") + .startObject("properties") + .startObject("field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject(); + indexService.mapperService().merge("type2", new CompressedXContent(mappings2.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); + + XContentBuilder json = XContentFactory.jsonBuilder().startObject() + .field("field", "foo") + .endObject(); SourceToParse source = SourceToParse.source("test", "type1", "1", json.bytes(), json.contentType()); DocumentMapper mapper = indexService.mapperService().documentMapper("type1"); assertNull(mapper.mappers().getMapper("field.raw")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java index 185f1c51d2e..cbef022af75 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java @@ -22,20 +22,29 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; public class IdFieldMapperTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testIncludeInObjectNotAllowed() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); @@ -51,7 +60,7 @@ public class IdFieldMapperTests extends ESSingleNodeTestCase { public void testDefaultsMultipleTypes() throws IOException { Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", false) + .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); @@ -60,9 +69,7 @@ public class IdFieldMapperTests extends ESSingleNodeTestCase { } public void testDefaultsSingleType() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", true) - .build(); + Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java index 2209027c12f..95b8b0daa48 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java @@ -49,11 +49,10 @@ public class IdFieldTypeTests extends FieldTypeTestCase { public void testTermsQueryWhenTypesAreEnabled() throws Exception { QueryShardContext context = Mockito.mock(QueryShardContext.class); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) // allows for multiple types .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put("index.mapping.single_type", false).build(); + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); @@ -80,8 +79,7 @@ public class IdFieldTypeTests extends FieldTypeTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put("index.mapping.single_type", true).build(); + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index cb0b922e197..26049bd9103 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; @@ -28,12 +29,15 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matchers; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -48,6 +52,11 @@ import static org.hamcrest.Matchers.startsWith; public class MapperServiceTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testTypeNameStartsWithIllegalDot() { String index = "test-index"; String type = ".test-type"; @@ -74,7 +83,8 @@ public class MapperServiceTests extends ESSingleNodeTestCase { } public void testTypes() throws Exception { - IndexService indexService1 = createIndex("index1", Settings.builder().put("index.mapping.single_type", false).build()); + IndexService indexService1 = createIndex("index1", Settings.builder().put("index.version.created", Version.V_5_6_0) // multi types + .build()); MapperService mapperService = indexService1.mapperService(); assertEquals(Collections.emptySet(), mapperService.types()); @@ -207,7 +217,8 @@ public class MapperServiceTests extends ESSingleNodeTestCase { } public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IOException { - IndexService indexService = createIndex("test", Settings.builder().put("index.mapping.single_type", false).build()); + IndexService indexService = createIndex("test", + Settings.builder().put("index.version.created", Version.V_5_6_0).build()); // multiple types CompressedXContent simpleMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() .startObject("properties") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index f4a8ce11c56..157033d4148 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -19,16 +19,21 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.Version; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.ObjectMapper.Dynamic; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; import java.util.function.Function; import static org.hamcrest.Matchers.containsString; @@ -36,6 +41,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; public class NestedObjectMapperTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testEmptyNested() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("nested1").field("type", "nested").endObject() @@ -382,16 +393,34 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase { .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false)); assertThat(e.getMessage(), containsString("Limit of nested fields [1] in index [test3] has been exceeded")); + // do not check nested fields limit if mapping is not updated + createIndex("test4", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY, false); + } + + public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { + Function mapping = type -> { + try { + return XContentFactory.jsonBuilder().startObject().startObject(type).startObject("properties") + .startObject("nested1").field("type", "nested").startObject("properties") + .startObject("nested2").field("type", "nested") + .endObject().endObject().endObject() + .endObject().endObject().endObject().string(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + MapperService mapperService = createIndex("test4", Settings.builder() - .put("mapping.single_type", false) - .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2).build()).mapperService(); + .put("index.version.created", Version.V_5_6_0) + .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2).build()).mapperService(); mapperService.merge("type1", new CompressedXContent(mapping.apply("type1")), MergeReason.MAPPING_UPDATE, false); // merging same fields, but different type is ok mapperService.merge("type2", new CompressedXContent(mapping.apply("type2")), MergeReason.MAPPING_UPDATE, false); // adding new fields from different type is not ok String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3") .field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject().string(); - e = expectThrows(IllegalArgumentException.class, () -> + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false)); assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index 7ef1b751eeb..935562a2bc4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -35,9 +36,12 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.InternalSettingsPlugin; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -47,6 +51,11 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; public class ParentFieldMapperTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testParentSetInDocNotAllowed() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .endObject().endObject().string(); @@ -67,7 +76,7 @@ public class ParentFieldMapperTests extends ESSingleNodeTestCase { String childMapping = XContentFactory.jsonBuilder().startObject().startObject("child_type") .startObject("_parent").field("type", "parent_type").endObject() .endObject().endObject().string(); - IndexService indexService = createIndex("test", Settings.builder().put("mapping.single_type", false).build()); + IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 367f79e5980..854164063e3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -25,15 +25,17 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; @@ -69,7 +71,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { return pluginList(InternalSettingsPlugin.class); } - public void testDefaults() throws Exception { + public void testDefaults() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "text").endObject().endObject() .endObject().endObject().string(); @@ -185,7 +187,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { for (String option : supportedOptions.keySet()) { jsonDoc.field(option, "1234"); } - ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", jsonDoc.endObject().bytes(), + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", jsonDoc.endObject().bytes(), XContentType.JSON)); for (Map.Entry entry : supportedOptions.entrySet()) { @@ -207,12 +209,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() + SourceToParse sourceToParse = SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .array("field", new String[] {"a", "b"}) .endObject() .bytes(), - XContentType.JSON)); + XContentType.JSON); + ParsedDocument doc = mapper.parse(sourceToParse); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -221,7 +224,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertEquals("b", fields[1].stringValue()); IndexShard shard = indexService.getShard(0); - shard.index(new Engine.Index(new Term("_id", doc.id()), doc)); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); @@ -247,12 +251,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() + SourceToParse sourceToParse = SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() - .array("field", new String[] {"a", "b"}) + .array("field", new String[]{"a", "b"}) .endObject() .bytes(), - XContentType.JSON)); + XContentType.JSON); + ParsedDocument doc = mapper.parse(sourceToParse); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -261,7 +266,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertEquals("b", fields[1].stringValue()); IndexShard shard = indexService.getShard(0); - shard.index(new Engine.Index(new Term("_id", doc.id()), doc)); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); @@ -372,7 +378,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase { assertEquals(mapping, mapper.mappingSource().toString()); } - public void testTermVectors() throws Exception { + public void testTermVectors() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") .startObject("field1") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index d3091ac3459..cf7b4a233a1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -61,8 +62,8 @@ public class TypeFieldMapperTests extends ESSingleNodeTestCase { } public void testDocValues(boolean singleType) throws IOException { - Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", singleType) + Settings indexSettings = singleType ? Settings.EMPTY : Settings.builder() + .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); @@ -89,7 +90,7 @@ public class TypeFieldMapperTests extends ESSingleNodeTestCase { public void testDefaultsMultipleTypes() throws IOException { Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", false) + .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); @@ -100,9 +101,7 @@ public class TypeFieldMapperTests extends ESSingleNodeTestCase { } public void testDefaultsSingleType() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", true) - .build(); + Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index b8a2805efe9..8f64e051929 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -62,8 +62,7 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put("index.mapping.single_type", true).build(); + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); @@ -100,11 +99,11 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { QueryShardContext context = Mockito.mock(QueryShardContext.class); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) // to allow for multiple types .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put("index.mapping.single_type", false).build(); + .build(); IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java index e5503738f06..c5816de2e19 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java @@ -21,22 +21,31 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; public class UidFieldMapperTests extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testDefaultsMultipleTypes() throws IOException { Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", false) + .put("index.version.created", Version.V_5_6_0) .build(); MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); @@ -49,9 +58,7 @@ public class UidFieldMapperTests extends ESSingleNodeTestCase { } public void testDefaultsSingleType() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.mapping.single_type", true) - .build(); + Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java index 1a9a78f51cf..14de6e0d255 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java @@ -52,11 +52,11 @@ public class UidFieldTypeTests extends FieldTypeTestCase { public void testTermsQueryWhenTypesAreEnabled() throws Exception { QueryShardContext context = Mockito.mock(QueryShardContext.class); Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) // to allow for multipel types .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put("index.mapping.single_type", false).build(); + .build(); IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); @@ -78,8 +78,7 @@ public class UidFieldTypeTests extends FieldTypeTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put("index.mapping.single_type", true).build(); + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index 7c18d9cb1a6..c6a1eae036a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.Version; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -150,7 +151,8 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { .startObject("properties").startObject("foo").field("type", "long").endObject() .endObject().endObject().endObject(); XContentBuilder mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject(); - MapperService mapperService = createIndex("test", Settings.builder().put("mapping.single_type", false).build()).mapperService(); + MapperService mapperService = createIndex("test", Settings.builder().put("index.version.created", + Version.V_5_6_0).build()).mapperService(); mapperService.merge("type1", new CompressedXContent(mapping1.string()), MapperService.MergeReason.MAPPING_UPDATE, false); mapperService.merge("type2", new CompressedXContent(mapping2.string()), MapperService.MergeReason.MAPPING_UPDATE, false); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index ca3850c4118..038eadeff7d 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -62,7 +62,6 @@ import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -401,12 +400,18 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase { + private long clusterStateVersion; private IndexShard primary; private IndexMetaData indexMetaData; private final List replicas; private final AtomicInteger replicaId = new AtomicInteger(); private final AtomicInteger docId = new AtomicInteger(); boolean closed = false; + private final PrimaryReplicaSyncer primaryReplicaSyncer = new PrimaryReplicaSyncer(Settings.EMPTY, new TaskManager(Settings.EMPTY), + (request, parentTask, primaryAllocationId, listener) -> { + try { + new ResyncAction(request, listener, ReplicationGroup.this).execute(); + } catch (Exception e) { + throw new AssertionError(e); + } + }); ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); primary = newShard(primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting)); replicas = new ArrayList<>(); this.indexMetaData = indexMetaData; + clusterStateVersion = 1; updateAllocationIDsOnPrimary(); for (int i = 0; i < indexMetaData.getNumberOfReplicas(); i++) { addReplica(); @@ -209,8 +224,14 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId()); primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null)); primary.recoverFromStore(); - primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry())); - updateAllocationIDsOnPrimary(); + HashSet activeIds = new HashSet<>(); + activeIds.addAll(activeIds()); + activeIds.add(primary.routingEntry().allocationId().getId()); + HashSet initializingIds = new HashSet<>(); + initializingIds.addAll(initializingIds()); + initializingIds.remove(primary.routingEntry().allocationId().getId()); + primary.updateShardState(ShardRoutingHelper.moveToStarted(primary.routingEntry()), primary.getPrimaryTerm(), null, + ++clusterStateVersion, activeIds, initializingIds); for (final IndexShard replica : replicas) { recoverReplica(replica); } @@ -224,11 +245,12 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase return replica; } - public synchronized void addReplica(IndexShard replica) { + public synchronized void addReplica(IndexShard replica) throws IOException { assert shardRoutings().stream() .filter(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())).findFirst().isPresent() == false : "replica with aId [" + replica.routingEntry().allocationId() + "] already exists"; replicas.add(replica); + clusterStateVersion++; updateAllocationIDsOnPrimary(); } @@ -243,6 +265,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase final IndexShard newReplica = newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting)); replicas.add(newReplica); + clusterStateVersion++; updateAllocationIDsOnPrimary(); return newReplica; } @@ -254,21 +277,53 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase /** * promotes the specific replica as the new primary */ - public synchronized void promoteReplicaToPrimary(IndexShard replica) throws IOException { + public synchronized Future promoteReplicaToPrimary(IndexShard replica) throws IOException { final long newTerm = indexMetaData.primaryTerm(shardId.id()) + 1; IndexMetaData.Builder newMetaData = IndexMetaData.builder(indexMetaData).primaryTerm(shardId.id(), newTerm); indexMetaData = newMetaData.build(); assertTrue(replicas.remove(replica)); closeShards(primary); primary = replica; - primary.updateRoutingEntry(replica.routingEntry().moveActiveReplicaToPrimary()); - primary.updatePrimaryTerm(newTerm); - updateAllocationIDsOnPrimary(); + PlainActionFuture fut = new PlainActionFuture<>(); + HashSet activeIds = new HashSet<>(); + activeIds.addAll(activeIds()); + activeIds.add(replica.routingEntry().allocationId().getId()); + HashSet initializingIds = new HashSet<>(); + initializingIds.addAll(initializingIds()); + initializingIds.remove(replica.routingEntry().allocationId().getId()); + primary.updateShardState(replica.routingEntry().moveActiveReplicaToPrimary(), + newTerm, (shard, listener) -> primaryReplicaSyncer.resync(shard, + new ActionListener() { + @Override + public void onResponse(PrimaryReplicaSyncer.ResyncTask resyncTask) { + listener.onResponse(resyncTask); + fut.onResponse(resyncTask); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + fut.onFailure(e); + } + }), ++clusterStateVersion, activeIds, initializingIds); + + return fut; } - synchronized boolean removeReplica(IndexShard replica) { + private synchronized Set activeIds() { + return shardRoutings().stream() + .filter(ShardRouting::active).map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet()); + } + + private synchronized Set initializingIds() { + return shardRoutings().stream() + .filter(ShardRouting::initializing).map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet()); + } + + synchronized boolean removeReplica(IndexShard replica) throws IOException { final boolean removed = replicas.remove(replica); if (removed) { + clusterStateVersion++; updateAllocationIDsOnPrimary(); } return removed; @@ -288,6 +343,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase BiFunction targetSupplier, boolean markAsRecovering) throws IOException { ESIndexLevelReplicationTestCase.this.recoverReplica(replica, primary, targetSupplier, markAsRecovering); + clusterStateVersion++; updateAllocationIDsOnPrimary(); } @@ -365,17 +421,9 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } } - private void updateAllocationIDsOnPrimary() { - Set active = new HashSet<>(); - Set initializing = new HashSet<>(); - for (ShardRouting shard: shardRoutings()) { - if (shard.active()) { - active.add(shard.allocationId().getId()); - } else { - initializing.add(shard.allocationId().getId()); - } - } - primary.updateAllocationIdsFromMaster(active, initializing); + private void updateAllocationIDsOnPrimary() throws IOException { + primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, clusterStateVersion, + activeIds(), initializingIds()); } } @@ -394,36 +442,40 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase this.opType = opType; } - public void execute() throws Exception { - new ReplicationOperation(request, new PrimaryRef(), - new ActionListener() { + public void execute() { + try { + new ReplicationOperation(request, new PrimaryRef(), + new ActionListener() { + @Override + public void onResponse(PrimaryResult result) { + result.respond(listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, new ReplicasRef(), () -> null, logger, opType) { + @Override - public void onResponse(PrimaryResult result) { - result.respond(listener); + protected List getShards(ShardId shardId, ClusterState state) { + return replicationGroup.shardRoutings(); } @Override - public void onFailure(Exception e) { - listener.onFailure(e); + protected String checkActiveShardCount() { + return null; } - }, new ReplicasRef(), () -> null, logger, opType) { - @Override - protected List getShards(ShardId shardId, ClusterState state) { - return replicationGroup.shardRoutings(); - } - - @Override - protected String checkActiveShardCount() { - return null; - } - - @Override - protected Set getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) { - return replicationGroup.shardRoutings().stream().filter(ShardRouting::active).map(r -> r.allocationId().getId()) - .collect(Collectors.toSet()); - } - }.execute(); + @Override + protected Set getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) { + return replicationGroup.shardRoutings().stream().filter(ShardRouting::active).map(r -> r.allocationId().getId()) + .collect(Collectors.toSet()); + } + }.execute(); + } catch (Exception e) { + listener.onFailure(e); + } } protected abstract PrimaryResult performOnPrimary(IndexShard primary, Request request) throws Exception; @@ -478,11 +530,11 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase .filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get(); replica.acquireReplicaOperationPermit( request.primaryTerm(), + globalCheckpoint, new ActionListener() { @Override public void onResponse(Releasable releasable) { try { - replica.updateGlobalCheckpointOnReplica(globalCheckpoint); performOnReplica(request, replica); releasable.close(); listener.onResponse( @@ -625,4 +677,37 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } } + class ResyncAction extends ReplicationAction { + + ResyncAction(ResyncReplicationRequest request, ActionListener listener, ReplicationGroup replicationGroup) { + super(request, listener, replicationGroup, "resync"); + } + + @Override + protected PrimaryResult performOnPrimary(IndexShard primary, ResyncReplicationRequest request) throws Exception { + final TransportWriteAction.WritePrimaryResult result = + executeResyncOnPrimary(primary, request); + return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + } + + @Override + protected void performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception { + executeResyncOnReplica(replica, request); + } + } + + private TransportWriteAction.WritePrimaryResult executeResyncOnPrimary( + IndexShard primary, ResyncReplicationRequest request) throws Exception { + final TransportWriteAction.WritePrimaryResult result = + new TransportWriteAction.WritePrimaryResult<>(TransportResyncReplicationAction.performOnPrimary(request, primary), + new ResyncReplicationResponse(), null, null, primary, logger); + request.primaryTerm(primary.getPrimaryTerm()); + TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.location, logger); + return result; + } + + private void executeResyncOnReplica(IndexShard replica, ResyncReplicationRequest request) throws Exception { + final Translog.Location location = TransportResyncReplicationAction.performOnReplica(request, replica); + TransportWriteActionTestHelper.performPostWriteActions(replica, request, location, logger); + } } diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 9b2200d8be3..33a1cfed0b6 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -18,17 +18,15 @@ */ package org.elasticsearch.index.replication; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexableField; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; @@ -44,7 +42,6 @@ import org.elasticsearch.index.shard.IndexShardTests; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.RecoveryTarget; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; import java.io.IOException; @@ -272,9 +269,8 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); shards.assertAllEqual(0); for (IndexShard indexShard : shards) { - try(Translog.View view = indexShard.acquireTranslogView()) { - assertThat(view.totalOperations(), equalTo(0)); - } + assertThat(indexShard.routingEntry() + " has the wrong number of ops in the translog", + indexShard.translogStats().estimatedNumberOfOperations(), equalTo(0)); } // add some replicas @@ -292,9 +288,8 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); shards.assertAllEqual(0); for (IndexShard indexShard : shards) { - try(Translog.View view = indexShard.acquireTranslogView()) { - assertThat(view.totalOperations(), equalTo(0)); - } + assertThat(indexShard.routingEntry() + " has the wrong number of ops in the translog", + indexShard.translogStats().estimatedNumberOfOperations(), equalTo(0)); } } } @@ -327,8 +322,8 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase String failureMessage) throws IOException { for (IndexShard indexShard : replicationGroup) { try(Translog.View view = indexShard.acquireTranslogView()) { - assertThat(view.totalOperations(), equalTo(expectedOperation)); - final Translog.Snapshot snapshot = view.snapshot(); + assertThat(view.estimateTotalOperations(SequenceNumbersService.NO_OPS_PERFORMED), equalTo(expectedOperation)); + final Translog.Snapshot snapshot = view.snapshot(SequenceNumbersService.NO_OPS_PERFORMED); long expectedSeqNo = 0L; Translog.Operation op = snapshot.next(); do { diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 1c7705d534a..9e030f68a3b 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -29,13 +29,13 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; @@ -55,6 +55,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; @@ -113,18 +114,25 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC docs += missingOnReplica; replicaHasDocsSinceLastFlushedCheckpoint |= missingOnReplica > 0; - final boolean flushPrimary = randomBoolean(); - if (flushPrimary) { + final boolean translogTrimmed; + if (randomBoolean()) { shards.flush(); + translogTrimmed = randomBoolean(); + if (translogTrimmed) { + final Translog translog = shards.getPrimary().getTranslog(); + translog.getDeletionPolicy().setRetentionAgeInMillis(0); + translog.trimUnreferencedReaders(); + } + } else { + translogTrimmed = false; } - originalReplica.close("disconnected", false); IOUtils.close(originalReplica.store()); final IndexShard recoveredReplica = shards.addReplicaWithExistingPath(originalReplica.shardPath(), originalReplica.routingEntry().currentNodeId()); shards.recoverReplica(recoveredReplica); - if (flushPrimary && replicaHasDocsSinceLastFlushedCheckpoint) { - // replica has something to catch up with, but since we flushed the primary, we should fall back to full recovery + if (translogTrimmed && replicaHasDocsSinceLastFlushedCheckpoint) { + // replica has something to catch up with, but since we trimmed the primary translog, we should fall back to full recovery assertThat(recoveredReplica.recoveryState().getIndex().fileDetails(), not(empty())); } else { assertThat(recoveredReplica.recoveryState().getIndex().fileDetails(), empty()); @@ -177,6 +185,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC // index some more totalDocs += shards.indexDocs(randomIntBetween(0, 5)); + if (randomBoolean()) { + newPrimary.flush(new FlushRequest()); + } + oldPrimary.close("demoted", false); oldPrimary.store().close(); @@ -188,9 +200,10 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs)); } else { assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs - committedDocs)); + assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(totalDocs)); } + // roll back the extra ops in the replica shards.removeReplica(replica); replica.close("resync", false); replica.store().close(); @@ -201,6 +214,41 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } } + @TestLogging("org.elasticsearch.index.shard:TRACE,org.elasticsearch.action.resync:TRACE") + public void testResyncAfterPrimaryPromotion() throws Exception { + // TODO: check translog trimming functionality once it's implemented + try (ReplicationGroup shards = createGroup(2)) { + shards.startAll(); + int initialDocs = shards.indexDocs(randomInt(10)); + boolean syncedGlobalCheckPoint = randomBoolean(); + if (syncedGlobalCheckPoint) { + shards.syncGlobalCheckpoint(); + } + + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard newPrimary = shards.getReplicas().get(0); + final IndexShard otherReplica = shards.getReplicas().get(1); + + // simulate docs that were inflight when primary failed + final int extraDocs = randomIntBetween(0, 5); + logger.info("--> indexing {} extra docs", extraDocs); + for (int i = 0; i < extraDocs; i++) { + final IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "extra_" + i) + .source("{}", XContentType.JSON); + final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); + indexOnReplica(bulkShardRequest, newPrimary); + } + logger.info("--> resyncing replicas"); + PrimaryReplicaSyncer.ResyncTask task = shards.promoteReplicaToPrimary(newPrimary).get(); + if (syncedGlobalCheckPoint) { + assertEquals(extraDocs, task.getResyncedOperations()); + } else { + assertThat(task.getResyncedOperations(), greaterThanOrEqualTo(extraDocs)); + } + shards.assertAllEqual(initialDocs + extraDocs); + } + } + @TestLogging( "_root:DEBUG," + "org.elasticsearch.action.bulk:TRACE," @@ -407,7 +455,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } } - private static class BlockingTarget extends RecoveryTarget { + public static class BlockingTarget extends RecoveryTarget { private final CountDownLatch recoveryBlocked; private final CountDownLatch releaseRecovery; @@ -416,8 +464,9 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC EnumSet.of(RecoveryState.Stage.INDEX, RecoveryState.Stage.TRANSLOG, RecoveryState.Stage.FINALIZE); private final Logger logger; - BlockingTarget(RecoveryState.Stage stageToBlock, CountDownLatch recoveryBlocked, CountDownLatch releaseRecovery, IndexShard shard, - DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, Logger logger) { + public BlockingTarget(RecoveryState.Stage stageToBlock, CountDownLatch recoveryBlocked, CountDownLatch releaseRecovery, + IndexShard shard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener, + Logger logger) { super(shard, sourceNode, listener, version -> {}); this.recoveryBlocked = recoveryBlocked; this.releaseRecovery = releaseRecovery; diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index 4d73ce48a2e..d4dc71388ac 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -226,7 +226,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(7)); @@ -241,7 +241,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(28)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(13)); @@ -263,7 +263,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD ); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6)); + assertThat(topDocs.totalHits, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(23)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(12)); @@ -278,7 +278,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6)); + assertThat(topDocs.totalHits, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(15)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(3)); @@ -294,7 +294,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); - assertThat(topDocs.totalHits, equalTo(8)); + assertThat(topDocs.totalHits, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(19)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(127)); @@ -310,7 +310,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); - assertThat(topDocs.totalHits, equalTo(8)); + assertThat(topDocs.totalHits, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(19)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-127)); @@ -336,7 +336,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index 1dc982270f7..c643ea6cee0 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -69,7 +69,7 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTestCas Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index 4262b959099..13d0e83e37e 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -68,7 +68,7 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests { Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index c9c48a9f969..823ccf91d30 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -285,7 +285,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(3)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a")); @@ -302,7 +302,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7)); + assertThat(topDocs.totalHits, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(28)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o")); @@ -328,7 +328,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { ); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6)); + assertThat(topDocs.totalHits, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(23)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m")); diff --git a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java index 61eb4581328..0eee4eb8a44 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointTrackerTests.java @@ -19,9 +19,13 @@ package org.elasticsearch.index.seqno; +import com.carrotsearch.hppc.ObjectLongHashMap; +import com.carrotsearch.hppc.ObjectLongMap; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.shard.PrimaryContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -29,7 +33,6 @@ import org.junit.Before; import java.util.Arrays; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -43,11 +46,15 @@ import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; +import java.util.stream.StreamSupport; import static org.elasticsearch.index.seqno.SequenceNumbersService.UNASSIGNED_SEQ_NO; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; public class GlobalCheckpointTrackerTests extends ESTestCase { @@ -79,6 +86,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { } public void testGlobalCheckpointUpdate() { + final long initialClusterStateVersion = randomNonNegativeLong(); Map allocations = new HashMap<>(); Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5); Set active = new HashSet<>(activeWithCheckpoints.keySet()); @@ -107,7 +115,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); }); - tracker.updateAllocationIdsFromMaster(active, initializing); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion, active, initializing); initializing.forEach(aId -> markAllocationIdAsInSyncQuietly(tracker, aId, tracker.getGlobalCheckpoint())); allocations.keySet().forEach(aId -> tracker.updateLocalCheckpoint(aId, allocations.get(aId))); @@ -130,7 +138,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { Set newActive = new HashSet<>(active); newActive.add(extraId); - tracker.updateAllocationIdsFromMaster(newActive, initializing); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 1, newActive, initializing); // now notify for the new id tracker.updateLocalCheckpoint(extraId, minLocalCheckpointAfterUpdates + 1 + randomInt(4)); @@ -146,6 +154,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { assigned.putAll(active); assigned.putAll(initializing); tracker.updateAllocationIdsFromMaster( + randomNonNegativeLong(), active.keySet(), initializing.keySet()); randomSubsetOf(initializing.keySet()).forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getGlobalCheckpoint())); @@ -166,7 +175,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { public void testMissingInSyncIdsPreventAdvance() { final Map active = randomAllocationsWithLocalCheckpoints(0, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); - tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet()); + tracker.updateAllocationIdsFromMaster(randomNonNegativeLong(), active.keySet(), initializing.keySet()); initializing.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getGlobalCheckpoint())); randomSubsetOf(randomInt(initializing.size() - 1), initializing.keySet()).forEach(aId -> tracker.updateLocalCheckpoint(aId, initializing.get(aId))); @@ -184,7 +193,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { final Map active = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); - tracker.updateAllocationIdsFromMaster(active.keySet(), initializing.keySet()); + tracker.updateAllocationIdsFromMaster(randomNonNegativeLong(), active.keySet(), initializing.keySet()); initializing.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getGlobalCheckpoint())); nonApproved.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getGlobalCheckpoint())); @@ -196,6 +205,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { } public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { + final long initialClusterStateVersion = randomNonNegativeLong(); final Map activeToStay = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5); final Map activeToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5); @@ -211,7 +221,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { if (randomBoolean()) { allocations.putAll(initializingToBeRemoved); } - tracker.updateAllocationIdsFromMaster(active, initializing); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion, active, initializing); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAllocationIdAsInSyncQuietly(tracker, k, tracker.getGlobalCheckpoint())); } else { @@ -223,11 +233,11 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { // now remove shards if (randomBoolean()) { - tracker.updateAllocationIdsFromMaster(activeToStay.keySet(), initializingToStay.keySet()); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 1, activeToStay.keySet(), initializingToStay.keySet()); allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid, ckp + 10L)); } else { allocations.forEach((aid, ckp) -> tracker.updateLocalCheckpoint(aid, ckp + 10L)); - tracker.updateAllocationIdsFromMaster(activeToStay.keySet(), initializingToStay.keySet()); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 2, activeToStay.keySet(), initializingToStay.keySet()); } final long checkpoint = Stream.concat(activeToStay.values().stream(), initializingToStay.values().stream()) @@ -243,7 +253,8 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { final AtomicBoolean complete = new AtomicBoolean(); final String inSyncAllocationId =randomAlphaOfLength(16); final String trackingAllocationId = randomAlphaOfLength(16); - tracker.updateAllocationIdsFromMaster(Collections.singleton(inSyncAllocationId), Collections.singleton(trackingAllocationId)); + tracker.updateAllocationIdsFromMaster( + randomNonNegativeLong(), Collections.singleton(inSyncAllocationId), Collections.singleton(trackingAllocationId)); tracker.updateLocalCheckpoint(inSyncAllocationId, globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -291,7 +302,8 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { final AtomicBoolean interrupted = new AtomicBoolean(); final String inSyncAllocationId = randomAlphaOfLength(16); final String trackingAllocationId = randomAlphaOfLength(32); - tracker.updateAllocationIdsFromMaster(Collections.singleton(inSyncAllocationId), Collections.singleton(trackingAllocationId)); + tracker.updateAllocationIdsFromMaster( + randomNonNegativeLong(), Collections.singleton(inSyncAllocationId), Collections.singleton(trackingAllocationId)); tracker.updateLocalCheckpoint(inSyncAllocationId, globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -329,21 +341,14 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { } public void testUpdateAllocationIdsFromMaster() throws Exception { + final long initialClusterStateVersion = randomNonNegativeLong(); final int numberOfActiveAllocationsIds = randomIntBetween(2, 16); - final Set activeAllocationIds = - IntStream.range(0, numberOfActiveAllocationsIds).mapToObj(i -> randomAlphaOfLength(16)).collect(Collectors.toSet()); final int numberOfInitializingIds = randomIntBetween(2, 16); - final Set initializingIds = - IntStream.range(0, numberOfInitializingIds).mapToObj(i -> { - do { - final String initializingId = randomAlphaOfLength(16); - // ensure we do not duplicate an allocation ID in active and initializing sets - if (!activeAllocationIds.contains(initializingId)) { - return initializingId; - } - } while (true); - }).collect(Collectors.toSet()); - tracker.updateAllocationIdsFromMaster(activeAllocationIds, initializingIds); + final Tuple, Set> activeAndInitializingAllocationIds = + randomActiveAndInitializingAllocationIds(numberOfActiveAllocationsIds, numberOfInitializingIds); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingIds = activeAndInitializingAllocationIds.v2(); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion, activeAllocationIds, initializingIds); // first we assert that the in-sync and tracking sets are set up correctly assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a))); @@ -364,7 +369,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { final List removingInitializingAllocationIds = randomSubsetOf(initializingIds); final Set newInitializingAllocationIds = initializingIds.stream().filter(a -> !removingInitializingAllocationIds.contains(a)).collect(Collectors.toSet()); - tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 1, newActiveAllocationIds, newInitializingAllocationIds); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a))); assertTrue(removingActiveAllocationIds.stream().noneMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a))); assertTrue(newInitializingAllocationIds.stream().allMatch(a -> tracker.trackingLocalCheckpoints.containsKey(a))); @@ -376,7 +381,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { */ newActiveAllocationIds.add(randomAlphaOfLength(32)); newInitializingAllocationIds.add(randomAlphaOfLength(64)); - tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 2, newActiveAllocationIds, newInitializingAllocationIds); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.inSyncLocalCheckpoints.containsKey(a))); assertTrue( newActiveAllocationIds @@ -416,7 +421,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { // using a different length than we have been using above ensures that we can not collide with a previous allocation ID final String newSyncingAllocationId = randomAlphaOfLength(128); newInitializingAllocationIds.add(newSyncingAllocationId); - tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 3, newActiveAllocationIds, newInitializingAllocationIds); final CyclicBarrier barrier = new CyclicBarrier(2); final Thread thread = new Thread(() -> { try { @@ -450,7 +455,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { * the in-sync set even if we receive a cluster state update that does not reflect this. * */ - tracker.updateAllocationIdsFromMaster(newActiveAllocationIds, newInitializingAllocationIds); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion + 4, newActiveAllocationIds, newInitializingAllocationIds); assertFalse(tracker.trackingLocalCheckpoints.containsKey(newSyncingAllocationId)); assertTrue(tracker.inSyncLocalCheckpoints.containsKey(newSyncingAllocationId)); } @@ -471,7 +476,7 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { final String active = randomAlphaOfLength(16); final String initializing = randomAlphaOfLength(32); - tracker.updateAllocationIdsFromMaster(Collections.singleton(active), Collections.singleton(initializing)); + tracker.updateAllocationIdsFromMaster(randomNonNegativeLong(), Collections.singleton(active), Collections.singleton(initializing)); final CyclicBarrier barrier = new CyclicBarrier(4); @@ -516,7 +521,216 @@ public class GlobalCheckpointTrackerTests extends ESTestCase { markingThread.join(); assertThat(tracker.getGlobalCheckpoint(), equalTo((long) nextActiveLocalCheckpoint)); + } + public void testPrimaryContextOlderThanAppliedClusterState() { + final long initialClusterStateVersion = randomIntBetween(0, Integer.MAX_VALUE - 1) + 1; + final int numberOfActiveAllocationsIds = randomIntBetween(0, 8); + final int numberOfInitializingIds = randomIntBetween(0, 8); + final Tuple, Set> activeAndInitializingAllocationIds = + randomActiveAndInitializingAllocationIds(numberOfActiveAllocationsIds, numberOfInitializingIds); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingAllocationIds = activeAndInitializingAllocationIds.v2(); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion, activeAllocationIds, initializingAllocationIds); + + /* + * We are going to establish a primary context from a cluster state version older than the applied cluster state version on the + * tracker. Because of recovery barriers established during relocation handoff, we know that the set of active allocation IDs in the + * newer cluster state is a superset of the allocation IDs in the applied cluster state with the caveat that an existing + * initializing allocation ID could have moved to an in-sync allocation ID within the tracker due to recovery finalization, and the + * set of initializing allocation IDs is otherwise arbitrary. + */ + final int numberOfAdditionalInitializingAllocationIds = randomIntBetween(0, 8); + final Set initializedAllocationIds = new HashSet<>(randomSubsetOf(initializingAllocationIds)); + final Set newInitializingAllocationIds = + randomAllocationIdsExcludingExistingIds( + Sets.union(activeAllocationIds, initializingAllocationIds), numberOfAdditionalInitializingAllocationIds); + final Set contextInitializingIds = Sets.union( + new HashSet<>(randomSubsetOf(Sets.difference(initializingAllocationIds, initializedAllocationIds))), + newInitializingAllocationIds); + + final int numberOfAdditionalActiveAllocationIds = randomIntBetween(0, 8); + final Set contextActiveAllocationIds = Sets.union( + Sets.union( + activeAllocationIds, + randomAllocationIdsExcludingExistingIds(activeAllocationIds, numberOfAdditionalActiveAllocationIds)), + initializedAllocationIds); + + final ObjectLongMap activeAllocationIdsLocalCheckpoints = new ObjectLongHashMap<>(); + for (final String allocationId : contextActiveAllocationIds) { + activeAllocationIdsLocalCheckpoints.put(allocationId, randomNonNegativeLong()); + } + final ObjectLongMap initializingAllocationIdsLocalCheckpoints = new ObjectLongHashMap<>(); + for (final String allocationId : contextInitializingIds) { + initializingAllocationIdsLocalCheckpoints.put(allocationId, randomNonNegativeLong()); + } + + final PrimaryContext primaryContext = new PrimaryContext( + initialClusterStateVersion - randomIntBetween(0, Math.toIntExact(initialClusterStateVersion) - 1), + activeAllocationIdsLocalCheckpoints, + initializingAllocationIdsLocalCheckpoints); + + tracker.updateAllocationIdsFromPrimaryContext(primaryContext); + + // the primary context carries an older cluster state version + assertThat(tracker.appliedClusterStateVersion, equalTo(initialClusterStateVersion)); + + // only existing active allocation IDs and initializing allocation IDs that moved to initialized should be in-sync + assertThat( + Sets.union(activeAllocationIds, initializedAllocationIds), + equalTo( + StreamSupport + .stream(tracker.inSyncLocalCheckpoints.keys().spliterator(), false) + .map(e -> e.value) + .collect(Collectors.toSet()))); + + // the local checkpoints known to the tracker for in-sync shards should match what is known in the primary context + for (final String allocationId : Sets.union(activeAllocationIds, initializedAllocationIds)) { + assertThat( + tracker.inSyncLocalCheckpoints.get(allocationId), equalTo(primaryContext.inSyncLocalCheckpoints().get(allocationId))); + } + + // only existing initializing allocation IDs that did not moved to initialized should be tracked + assertThat( + Sets.difference(initializingAllocationIds, initializedAllocationIds), + equalTo( + StreamSupport + .stream(tracker.trackingLocalCheckpoints.keys().spliterator(), false) + .map(e -> e.value) + .collect(Collectors.toSet()))); + + // the local checkpoints known to the tracker for initializing shards should match what is known in the primary context + for (final String allocationId : Sets.difference(initializingAllocationIds, initializedAllocationIds)) { + if (primaryContext.trackingLocalCheckpoints().containsKey(allocationId)) { + assertThat( + tracker.trackingLocalCheckpoints.get(allocationId), + equalTo(primaryContext.trackingLocalCheckpoints().get(allocationId))); + } else { + assertThat(tracker.trackingLocalCheckpoints.get(allocationId), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + } + } + + // the global checkpoint can only be computed from active allocation IDs and initializing allocation IDs that moved to initializing + final long globalCheckpoint = + StreamSupport + .stream(activeAllocationIdsLocalCheckpoints.spliterator(), false) + .filter(e -> tracker.inSyncLocalCheckpoints.containsKey(e.key) || initializedAllocationIds.contains(e.key)) + .mapToLong(e -> e.value) + .min() + .orElse(SequenceNumbersService.UNASSIGNED_SEQ_NO); + assertThat(tracker.getGlobalCheckpoint(), equalTo(globalCheckpoint)); + } + + public void testPrimaryContextNewerThanAppliedClusterState() { + final long initialClusterStateVersion = randomIntBetween(0, Integer.MAX_VALUE); + final int numberOfActiveAllocationsIds = randomIntBetween(0, 8); + final int numberOfInitializingIds = randomIntBetween(0, 8); + final Tuple, Set> activeAndInitializingAllocationIds = + randomActiveAndInitializingAllocationIds(numberOfActiveAllocationsIds, numberOfInitializingIds); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingAllocationIds = activeAndInitializingAllocationIds.v2(); + tracker.updateAllocationIdsFromMaster(initialClusterStateVersion, activeAllocationIds, initializingAllocationIds); + + /* + * We are going to establish a primary context from a cluster state version older than the applied cluster state version on the + * tracker. Because of recovery barriers established during relocation handoff, we know that the set of active allocation IDs in the + * newer cluster state is a subset of the allocation IDs in the applied cluster state with the caveat that an existing initializing + * allocation ID could have moved to an in-sync allocation ID within the tracker due to recovery finalization, and the set of + * initializing allocation IDs is otherwise arbitrary. + */ + final int numberOfNewInitializingAllocationIds = randomIntBetween(0, 8); + final Set initializedAllocationIds = new HashSet<>(randomSubsetOf(initializingAllocationIds)); + final Set newInitializingAllocationIds = + randomAllocationIdsExcludingExistingIds( + Sets.union(activeAllocationIds, initializingAllocationIds), numberOfNewInitializingAllocationIds); + + final ObjectLongMap activeAllocationIdsLocalCheckpoints = new ObjectLongHashMap<>(); + for (final String allocationId : Sets.union(new HashSet<>(randomSubsetOf(activeAllocationIds)), initializedAllocationIds)) { + activeAllocationIdsLocalCheckpoints.put(allocationId, randomNonNegativeLong()); + } + final ObjectLongMap initializingIdsLocalCheckpoints = new ObjectLongHashMap<>(); + final Set contextInitializingAllocationIds = Sets.union( + new HashSet<>(randomSubsetOf(Sets.difference(initializingAllocationIds, initializedAllocationIds))), + newInitializingAllocationIds); + for (final String allocationId : contextInitializingAllocationIds) { + initializingIdsLocalCheckpoints.put(allocationId, randomNonNegativeLong()); + } + + final PrimaryContext primaryContext = + new PrimaryContext( + initialClusterStateVersion + randomIntBetween(0, Integer.MAX_VALUE) + 1, + activeAllocationIdsLocalCheckpoints, + initializingIdsLocalCheckpoints); + + tracker.updateAllocationIdsFromPrimaryContext(primaryContext); + + final PrimaryContext trackerPrimaryContext = tracker.primaryContext(); + try { + assertTrue(tracker.sealed()); + final long globalCheckpoint = + StreamSupport + .stream(activeAllocationIdsLocalCheckpoints.values().spliterator(), false) + .mapToLong(e -> e.value) + .min() + .orElse(SequenceNumbersService.UNASSIGNED_SEQ_NO); + + // the primary context contains knowledge of the state of the entire universe + assertThat(primaryContext.clusterStateVersion(), equalTo(trackerPrimaryContext.clusterStateVersion())); + assertThat(primaryContext.inSyncLocalCheckpoints(), equalTo(trackerPrimaryContext.inSyncLocalCheckpoints())); + assertThat(primaryContext.trackingLocalCheckpoints(), equalTo(trackerPrimaryContext.trackingLocalCheckpoints())); + assertThat(tracker.getGlobalCheckpoint(), equalTo(globalCheckpoint)); + } finally { + tracker.releasePrimaryContext(); + assertFalse(tracker.sealed()); + } + } + + public void testPrimaryContextSealing() { + // the tracker should start in the state of not being sealed + assertFalse(tracker.sealed()); + + // sampling the primary context should seal the tracker + tracker.primaryContext(); + assertTrue(tracker.sealed()); + + /* + * Invoking methods that mutates the state of the tracker should fail (with the exception of updating allocation IDs and updating + * global checkpoint on replica which can happen on the relocation source). + */ + assertIllegalStateExceptionWhenSealed(() -> tracker.updateLocalCheckpoint(randomAlphaOfLength(16), randomNonNegativeLong())); + assertIllegalStateExceptionWhenSealed(() -> tracker.updateAllocationIdsFromPrimaryContext(mock(PrimaryContext.class))); + assertIllegalStateExceptionWhenSealed(() -> tracker.primaryContext()); + assertIllegalStateExceptionWhenSealed(() -> tracker.markAllocationIdAsInSync(randomAlphaOfLength(16), randomNonNegativeLong())); + + // closing the releasable should unseal the tracker + tracker.releasePrimaryContext(); + assertFalse(tracker.sealed()); + } + + private void assertIllegalStateExceptionWhenSealed(final ThrowingRunnable runnable) { + final IllegalStateException e = expectThrows(IllegalStateException.class, runnable); + assertThat(e, hasToString(containsString("global checkpoint tracker is sealed"))); + } + + private Tuple, Set> randomActiveAndInitializingAllocationIds( + final int numberOfActiveAllocationsIds, + final int numberOfInitializingIds) { + final Set activeAllocationIds = + IntStream.range(0, numberOfActiveAllocationsIds).mapToObj(i -> randomAlphaOfLength(16) + i).collect(Collectors.toSet()); + final Set initializingIds = randomAllocationIdsExcludingExistingIds(activeAllocationIds, numberOfInitializingIds); + return Tuple.tuple(activeAllocationIds, initializingIds); + } + + private Set randomAllocationIdsExcludingExistingIds(final Set existingAllocationIds, final int numberOfAllocationIds) { + return IntStream.range(0, numberOfAllocationIds).mapToObj(i -> { + do { + final String newAllocationId = randomAlphaOfLength(16); + // ensure we do not duplicate an allocation ID + if (!existingAllocationIds.contains(newAllocationId)) { + return newAllocationId + i; + } + } while (true); + }).collect(Collectors.toSet()); } private void markAllocationIdAsInSyncQuietly( diff --git a/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java b/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java index 74183670ecb..3d280b4d28c 100644 --- a/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/core/src/test/java/org/elasticsearch/index/seqno/LocalCheckpointTrackerTests.java @@ -45,16 +45,9 @@ public class LocalCheckpointTrackerTests extends ESTestCase { private LocalCheckpointTracker tracker; - private final int SMALL_CHUNK_SIZE = 4; + private static final int SMALL_CHUNK_SIZE = 4; - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - tracker = getTracker(); - } - - private LocalCheckpointTracker getTracker() { + public static LocalCheckpointTracker createEmptyTracker() { return new LocalCheckpointTracker( IndexSettingsModule.newIndexSettings( "test", @@ -67,6 +60,13 @@ public class LocalCheckpointTrackerTests extends ESTestCase { ); } + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + tracker = createEmptyTracker(); + } + public void testSimplePrimary() { long seqNo1, seqNo2; assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); @@ -236,5 +236,4 @@ public class LocalCheckpointTrackerTests extends ESTestCase { thread.join(); } - } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index a5e5ecd8aa6..0c07f4cf770 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -20,12 +20,12 @@ package org.elasticsearch.index.shard; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.index.Term; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterInfoService; @@ -39,8 +39,10 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -51,6 +53,7 @@ import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -58,7 +61,7 @@ import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; -import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; @@ -83,7 +86,6 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; -import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -343,39 +345,34 @@ public class IndexShardIT extends ESSingleNodeTestCase { client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldFlush()); - ParsedDocument doc = testParsedDocument( - "1", - "test", - null, - SequenceNumbersService.UNASSIGNED_SEQ_NO, - new ParseContext.Document(), - new BytesArray(new byte[]{1}), XContentType.JSON, null); - Engine.Index index = new Engine.Index(new Term("_id", doc.id()), doc); - shard.index(index); + shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); assertTrue(shard.shouldFlush()); - assertEquals(2, shard.getEngine().getTranslog().totalOperations()); + final Translog translog = shard.getEngine().getTranslog(); + assertEquals(2, translog.uncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async assertFalse(shard.shouldFlush()); }); - assertEquals(0, shard.getEngine().getTranslog().totalOperations()); - shard.getEngine().getTranslog().sync(); - long size = shard.getEngine().getTranslog().sizeInBytes(); - logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), - shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); + assertEquals(0, translog.uncommittedOperations()); + translog.sync(); + long size = translog.uncommittedSizeInBytes(); + logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), + translog.uncommittedOperations(), translog.getGeneration()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) .build()).get(); client().prepareDelete("test", "test", "2").get(); - logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), - shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); + logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), + translog.uncommittedOperations(), translog.getGeneration()); assertBusy(() -> { // this is async - logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), - shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration()); + logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), + translog.uncommittedOperations(), translog.getGeneration()); assertFalse(shard.shouldFlush()); }); - assertEquals(0, shard.getEngine().getTranslog().totalOperations()); + assertEquals(0, translog.uncommittedOperations()); } public void testMaybeRollTranslogGeneration() throws Exception { @@ -398,15 +395,9 @@ public class IndexShardIT extends ESSingleNodeTestCase { final int numberOfDocuments = randomIntBetween(32, 128); for (int i = 0; i < numberOfDocuments; i++) { assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); - final ParsedDocument doc = testParsedDocument( - "1", - "test", - null, - SequenceNumbersService.UNASSIGNED_SEQ_NO, - new ParseContext.Document(), - new BytesArray(new byte[]{1}), XContentType.JSON, null); - final Engine.Index index = new Engine.Index(new Term("_id", doc.id()), doc); - final Engine.IndexResult result = shard.index(index); + final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); final Translog.Location location = result.getTranslogLocation(); shard.afterWriteOperation(); if (location.translogLocation + location.size > generationThreshold) { @@ -458,7 +449,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { threads[i].start(); } barrier.await(); - final Runnable check; + final CheckedRunnable check; if (flush) { final FlushStats flushStats = shard.flushStats(); final long total = flushStats.getTotal(); @@ -534,16 +525,16 @@ public class IndexShardIT extends ESSingleNodeTestCase { } - public static final IndexShard recoverShard(IndexShard newShard) throws IOException { + public static final IndexShard recoverShard(IndexShard newShard) throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); return newShard; } - public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, - IndexingOperationListener... listeners) throws IOException { + public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, + IndexingOperationListener... listeners) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 5072e7a3b89..9093274a491 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -61,7 +61,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -133,9 +132,6 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.VersionType.EXTERNAL; -import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; -import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsString; @@ -196,7 +192,7 @@ public class IndexShardTests extends IndexShardTestCase { ShardStateMetaData shardStateMetaData = load(logger, shardStatePath); assertEquals(getShardStateMetadata(shard), shardStateMetaData); ShardRouting routing = shard.shardRouting; - shard.updateRoutingEntry(routing); + IndexShardTestCase.updateRoutingEntry(shard, routing); shardStateMetaData = load(logger, shardStatePath); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); @@ -204,7 +200,7 @@ public class IndexShardTests extends IndexShardTestCase { new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId())); routing = TestShardRouting.relocate(shard.shardRouting, "some node", 42L); - shard.updateRoutingEntry(routing); + IndexShardTestCase.updateRoutingEntry(shard, routing); shardStateMetaData = load(logger, shardStatePath); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); assertEquals(shardStateMetaData, @@ -279,13 +275,22 @@ public class IndexShardTests extends IndexShardTestCase { // expected } try { - indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), null, ThreadPool.Names.INDEX); + indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbersService.UNASSIGNED_SEQ_NO, null, + ThreadPool.Names.INDEX); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected } } + public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOException { + IndexShard indexShard = newShard(false); + expectThrows(IndexShardNotStartedException.class, () -> + indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100), + SequenceNumbersService.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX)); + closeShards(indexShard); + } + public void testPrimaryPromotionDelaysOperations() throws IOException, BrokenBarrierException, InterruptedException { final IndexShard indexShard = newStartedShard(false); @@ -303,6 +308,7 @@ public class IndexShardTests extends IndexShardTestCase { } indexShard.acquireReplicaOperationPermit( indexShard.getPrimaryTerm(), + indexShard.getGlobalCheckpoint(), new ActionListener() { @Override public void onResponse(Releasable releasable) { @@ -339,8 +345,8 @@ public class IndexShardTests extends IndexShardTestCase { true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateRoutingEntry(primaryRouting); - indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1); + indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, + 0L, Collections.emptySet(), Collections.emptySet()); final int delayedOperations = scaledRandomIntBetween(1, 64); final CyclicBarrier delayedOperationsBarrier = new CyclicBarrier(1 + delayedOperations); @@ -402,13 +408,13 @@ public class IndexShardTests extends IndexShardTestCase { int max = Math.toIntExact(SequenceNumbersService.NO_OPS_PERFORMED); boolean gap = false; for (int i = 0; i < operations; i++) { - final String id = Integer.toString(i); - final ParsedDocument doc = testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); if (!rarely()) { - final Term uid = new Term("_id", doc.id()); - final Engine.Index index = - new Engine.Index(uid, doc, i, indexShard.getPrimaryTerm(), 1, EXTERNAL, REPLICA, System.nanoTime(), -1, false); - indexShard.index(index); + final String id = Integer.toString(i); + SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "test", id, + new BytesArray("{}"), XContentType.JSON); + indexShard.applyIndexOperationOnReplica(i, indexShard.getPrimaryTerm(), + 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, + getMappingUpdater(indexShard, sourceToParse.type())); max = i; } else { gap = true; @@ -430,8 +436,8 @@ public class IndexShardTests extends IndexShardTestCase { true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateRoutingEntry(primaryRouting); - indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1); + indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, + 0L, Collections.emptySet(), Collections.emptySet()); /* * This operation completing means that the delay operation executed as part of increasing the primary term has completed and the @@ -472,8 +478,8 @@ public class IndexShardTests extends IndexShardTestCase { ShardRouting replicaRouting = indexShard.routingEntry(); ShardRouting primaryRouting = TestShardRouting.newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateRoutingEntry(primaryRouting); - indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1); + indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, + 0L, Collections.emptySet(), Collections.emptySet()); } else { indexShard = newStartedShard(true); } @@ -481,7 +487,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(0, indexShard.getActiveOperationsCount()); if (indexShard.routingEntry().isRelocationTarget() == false) { try { - indexShard.acquireReplicaOperationPermit(primaryTerm, null, ThreadPool.Names.INDEX); + indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.INDEX); fail("shard shouldn't accept operations as replica"); } catch (IllegalStateException ignored) { @@ -507,11 +513,11 @@ public class IndexShardTests extends IndexShardTestCase { private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquireReplicaOperationPermit(opPrimaryTerm, fut, ThreadPool.Names.INDEX); + indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.INDEX); return fut.get(); } - public void testOperationPermitOnReplicaShards() throws InterruptedException, ExecutionException, IOException, BrokenBarrierException { + public void testOperationPermitOnReplicaShards() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; final boolean engineClosed; @@ -539,8 +545,8 @@ public class IndexShardTests extends IndexShardTestCase { ShardRouting routing = indexShard.routingEntry(); routing = TestShardRouting.newShardRouting(routing.shardId(), routing.currentNodeId(), "otherNode", true, ShardRoutingState.RELOCATING, AllocationId.newRelocation(routing.allocationId())); - indexShard.updateRoutingEntry(routing); - indexShard.relocated("test"); + IndexShardTestCase.updateRoutingEntry(indexShard, routing); + indexShard.relocated("test", primaryContext -> {}); engineClosed = false; break; } @@ -555,16 +561,23 @@ public class IndexShardTests extends IndexShardTestCase { if (shardRouting.primary() == false) { final IllegalStateException e = expectThrows(IllegalStateException.class, () -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX)); - assertThat(e, hasToString(containsString("shard is not a primary"))); + assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary"))); } final long primaryTerm = indexShard.getPrimaryTerm(); final long translogGen = engineClosed ? -1 : indexShard.getTranslog().getGeneration().translogFileGeneration; - final Releasable operation1 = acquireReplicaOperationPermitBlockingly(indexShard, primaryTerm); - assertEquals(1, indexShard.getActiveOperationsCount()); - final Releasable operation2 = acquireReplicaOperationPermitBlockingly(indexShard, primaryTerm); - assertEquals(2, indexShard.getActiveOperationsCount()); + final Releasable operation1; + final Releasable operation2; + if (engineClosed == false) { + operation1 = acquireReplicaOperationPermitBlockingly(indexShard, primaryTerm); + assertEquals(1, indexShard.getActiveOperationsCount()); + operation2 = acquireReplicaOperationPermitBlockingly(indexShard, primaryTerm); + assertEquals(2, indexShard.getActiveOperationsCount()); + } else { + operation1 = null; + operation2 = null; + } { final AtomicBoolean onResponse = new AtomicBoolean(); @@ -583,7 +596,8 @@ public class IndexShardTests extends IndexShardTestCase { } }; - indexShard.acquireReplicaOperationPermit(primaryTerm - 1, onLockAcquired, ThreadPool.Names.INDEX); + indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbersService.UNASSIGNED_SEQ_NO, onLockAcquired, + ThreadPool.Names.INDEX); assertFalse(onResponse.get()); assertTrue(onFailure.get()); @@ -597,6 +611,21 @@ public class IndexShardTests extends IndexShardTestCase { final AtomicReference onFailure = new AtomicReference<>(); final CyclicBarrier barrier = new CyclicBarrier(2); final long newPrimaryTerm = primaryTerm + 1 + randomInt(20); + if (engineClosed == false) { + assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbersService.NO_OPS_PERFORMED)); + assertThat(indexShard.getGlobalCheckpoint(), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO)); + } + final long newGlobalCheckPoint; + if (engineClosed || randomBoolean()) { + newGlobalCheckPoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + } else { + long localCheckPoint = indexShard.getGlobalCheckpoint() + randomInt(100); + // advance local checkpoint + for (int i = 0; i <= localCheckPoint; i++) { + indexShard.markSeqNoAsNoop(i, indexShard.getPrimaryTerm(), "dummy doc"); + } + newGlobalCheckPoint = randomIntBetween((int) indexShard.getGlobalCheckpoint(), (int) localCheckPoint); + } // but you can not increment with a new primary term until the operations on the older primary term complete final Thread thread = new Thread(() -> { try { @@ -604,55 +633,72 @@ public class IndexShardTests extends IndexShardTestCase { } catch (final BrokenBarrierException | InterruptedException e) { throw new RuntimeException(e); } - indexShard.acquireReplicaOperationPermit( - newPrimaryTerm, - new ActionListener() { - @Override - public void onResponse(Releasable releasable) { - assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); - onResponse.set(true); - releasable.close(); - finish(); - } + ActionListener listener = new ActionListener() { + @Override + public void onResponse(Releasable releasable) { + assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); + assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); + onResponse.set(true); + releasable.close(); + finish(); + } - @Override - public void onFailure(Exception e) { - onFailure.set(e); - finish(); - } + @Override + public void onFailure(Exception e) { + onFailure.set(e); + finish(); + } - private void finish() { - try { - barrier.await(); - } catch (final BrokenBarrierException | InterruptedException e) { - throw new RuntimeException(e); - } - } - }, + private void finish() { + try { + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + } + }; + try { + indexShard.acquireReplicaOperationPermit( + newPrimaryTerm, + newGlobalCheckPoint, + listener, ThreadPool.Names.SAME); + } catch (Exception e) { + listener.onFailure(e); + } }); thread.start(); barrier.await(); - // our operation should be blocked until the previous operations complete - assertFalse(onResponse.get()); - assertNull(onFailure.get()); - assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); - Releasables.close(operation1); - // our operation should still be blocked - assertFalse(onResponse.get()); - assertNull(onFailure.get()); - assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); - Releasables.close(operation2); - barrier.await(); - // now lock acquisition should have succeeded - assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); - if (engineClosed) { + if (indexShard.state() == IndexShardState.CREATED || indexShard.state() == IndexShardState.RECOVERING) { + barrier.await(); + assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); assertFalse(onResponse.get()); - assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class)); + assertThat(onFailure.get(), instanceOf(IndexShardNotStartedException.class)); + Releasables.close(operation1); + Releasables.close(operation2); } else { - assertTrue(onResponse.get()); + // our operation should be blocked until the previous operations complete + assertFalse(onResponse.get()); assertNull(onFailure.get()); - assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(translogGen + 1)); + assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); + Releasables.close(operation1); + // our operation should still be blocked + assertFalse(onResponse.get()); + assertNull(onFailure.get()); + assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); + Releasables.close(operation2); + barrier.await(); + // now lock acquisition should have succeeded + assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); + if (engineClosed) { + assertFalse(onResponse.get()); + assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class)); + } else { + assertTrue(onResponse.get()); + assertNull(onFailure.get()); + assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(translogGen + 1)); + assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); + } } thread.join(); assertEquals(0, indexShard.getActiveOperationsCount()); @@ -680,6 +726,7 @@ public class IndexShardTests extends IndexShardTestCase { } indexShard.acquireReplicaOperationPermit( primaryTerm + increment, + indexShard.getGlobalCheckpoint(), new ActionListener() { @Override public void onResponse(Releasable releasable) { @@ -783,7 +830,7 @@ public class IndexShardTests extends IndexShardTestCase { snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_2")); @@ -976,10 +1023,7 @@ public class IndexShardTests extends IndexShardTestCase { }); recoveryShardFromStore(shard); - ParsedDocument doc = testParsedDocument("1", "test", null, new ParseContext.Document(), - new BytesArray(new byte[]{1}), null); - Engine.Index index = new Engine.Index(new Term("_id", doc.id()), doc); - shard.index(index); + indexDoc(shard, "test", "1"); assertEquals(1, preIndex.get()); assertEquals(1, postIndexCreate.get()); assertEquals(0, postIndexUpdate.get()); @@ -988,7 +1032,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(0, postDelete.get()); assertEquals(0, postDeleteException.get()); - shard.index(index); + indexDoc(shard, "test", "1"); assertEquals(2, preIndex.get()); assertEquals(1, postIndexCreate.get()); assertEquals(1, postIndexUpdate.get()); @@ -997,8 +1041,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(0, postDelete.get()); assertEquals(0, postDeleteException.get()); - Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", doc.id())); - shard.delete(delete); + deleteDoc(shard, "test", "1"); assertEquals(2, preIndex.get()); assertEquals(1, postIndexCreate.get()); @@ -1012,7 +1055,7 @@ public class IndexShardTests extends IndexShardTestCase { shard.state = IndexShardState.STARTED; // It will generate exception try { - shard.index(index); + indexDoc(shard, "test", "1"); fail(); } catch (AlreadyClosedException e) { @@ -1026,7 +1069,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(1, postDelete.get()); assertEquals(0, postDeleteException.get()); try { - shard.delete(delete); + deleteDoc(shard, "test", "1"); fail(); } catch (AlreadyClosedException e) { @@ -1045,12 +1088,12 @@ public class IndexShardTests extends IndexShardTestCase { public void testLockingBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); - shard.updateRoutingEntry(ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); CountDownLatch latch = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { latch.countDown(); try { - shard.relocated("simulated recovery"); + shard.relocated("simulated recovery", primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1076,10 +1119,10 @@ public class IndexShardTests extends IndexShardTestCase { public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); - shard.updateRoutingEntry(ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery"); + shard.relocated("simulated recovery", primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1110,7 +1153,7 @@ public class IndexShardTests extends IndexShardTestCase { public void testStressRelocated() throws Exception { final IndexShard shard = newStartedShard(true); - shard.updateRoutingEntry(ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; CountDownLatch allPrimaryOperationLocksAcquired = new CountDownLatch(numThreads); @@ -1132,7 +1175,7 @@ public class IndexShardTests extends IndexShardTestCase { AtomicBoolean relocated = new AtomicBoolean(); final Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery"); + shard.relocated("simulated recovery", primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1165,25 +1208,26 @@ public class IndexShardTests extends IndexShardTestCase { public void testRelocatedShardCanNotBeRevived() throws IOException, InterruptedException { final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); - shard.updateRoutingEntry(ShardRoutingHelper.relocate(originalRouting, "other_node")); - shard.relocated("test"); - expectThrows(IllegalIndexShardStateException.class, () -> shard.updateRoutingEntry(originalRouting)); + IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); + shard.relocated("test", primaryContext -> {}); + expectThrows(IllegalIndexShardStateException.class, () -> IndexShardTestCase.updateRoutingEntry(shard, originalRouting)); closeShards(shard); } public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOException, InterruptedException { final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); - shard.updateRoutingEntry(ShardRoutingHelper.relocate(originalRouting, "other_node")); - shard.updateRoutingEntry(originalRouting); - expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated("test")); + IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); + IndexShardTestCase.updateRoutingEntry(shard, originalRouting); + expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated("test", primaryContext -> {})); closeShards(shard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25419") public void testRelocatedShardCanNotBeRevivedConcurrently() throws IOException, InterruptedException, BrokenBarrierException { final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); - shard.updateRoutingEntry(ShardRoutingHelper.relocate(originalRouting, "other_node")); + IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); CyclicBarrier cyclicBarrier = new CyclicBarrier(3); AtomicReference relocationException = new AtomicReference<>(); Thread relocationThread = new Thread(new AbstractRunnable() { @@ -1195,7 +1239,7 @@ public class IndexShardTests extends IndexShardTestCase { @Override protected void doRun() throws Exception { cyclicBarrier.await(); - shard.relocated("test"); + shard.relocated("test", primaryContext -> {}); } }); relocationThread.start(); @@ -1209,7 +1253,7 @@ public class IndexShardTests extends IndexShardTestCase { @Override protected void doRun() throws Exception { cyclicBarrier.await(); - shard.updateRoutingEntry(originalRouting); + IndexShardTestCase.updateRoutingEntry(shard, originalRouting); } }); cancellingThread.start(); @@ -1247,7 +1291,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperations()); assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperationsOnStart()); assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); assertDocCount(newShard, 1); closeShards(newShard); } @@ -1256,14 +1300,14 @@ public class IndexShardTests extends IndexShardTestCase { public void testRecoverFromStoreWithNoOps() throws IOException { final IndexShard shard = newStartedShard(true); indexDoc(shard, "test", "0"); - Engine.Index test = indexDoc(shard, "test", "1"); + Engine.IndexResult test = indexDoc(shard, "test", "1"); // start a replica shard and index the second doc final IndexShard otherShard = newStartedShard(false); - test = otherShard.prepareIndexOnReplica( - SourceToParse.source(shard.shardId().getIndexName(), test.type(), test.id(), test.source(), - XContentType.JSON), - 1, 1, 1, EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); - otherShard.index(test); + updateMappings(otherShard, shard.indexSettings().getIndexMetaData()); + SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "test", "1", + new BytesArray("{}"), XContentType.JSON); + otherShard.applyIndexOperationOnReplica(1, 1, 1, + VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, update -> {}); final ShardRouting primaryShardRouting = shard.routingEntry(); IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting, @@ -1286,7 +1330,7 @@ public class IndexShardTests extends IndexShardTestCase { } } assertEquals(1, numNoops); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); assertDocCount(newShard, 1); assertDocCount(shard, 2); closeShards(newShard, shard); @@ -1310,7 +1354,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(0, newShard.recoveryState().getTranslog().totalOperations()); assertEquals(0, newShard.recoveryState().getTranslog().totalOperationsOnStart()); assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); assertDocCount(newShard, 0); closeShards(newShard); } @@ -1353,7 +1397,7 @@ public class IndexShardTests extends IndexShardTestCase { newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue("recover even if there is nothing to recover", newShard.recoverFromStore()); - newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); assertDocCount(newShard, 0); // we can't issue this request through a client because of the inconsistencies we created with the cluster state // doing it directly instead @@ -1369,11 +1413,11 @@ public class IndexShardTests extends IndexShardTestCase { ShardRouting origRouting = shard.routingEntry(); assertThat(shard.state(), equalTo(IndexShardState.STARTED)); ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node"); - shard.updateRoutingEntry(inRecoveryRouting); - shard.relocated("simulate mark as relocated"); + IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting); + shard.relocated("simulate mark as relocated", primaryContext -> {}); assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); try { - shard.updateRoutingEntry(origRouting); + IndexShardTestCase.updateRoutingEntry(shard, origRouting); fail("Expected IndexShardRelocatedException"); } catch (IndexShardRelocatedException expected) { } @@ -1422,7 +1466,7 @@ public class IndexShardTests extends IndexShardTestCase { } })); - target.updateRoutingEntry(routing.moveToStarted()); + IndexShardTestCase.updateRoutingEntry(target, routing.moveToStarted()); assertDocs(target, "0"); closeShards(source, target); @@ -1682,6 +1726,7 @@ public class IndexShardTests extends IndexShardTestCase { null)); primary.recoverFromStore(); + primary.state = IndexShardState.RECOVERING; // translog recovery on the next line would otherwise fail as we are in POST_RECOVERY primary.runTranslogRecovery(primary.getEngine(), snapshot); assertThat(primary.recoveryState().getTranslog().totalOperationsOnStart(), equalTo(numTotalEntries)); assertThat(primary.recoveryState().getTranslog().totalOperations(), equalTo(numTotalEntries)); @@ -1690,61 +1735,6 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(primary); } - public void testTranslogOpToEngineOpConverter() throws IOException { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .build(); - IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") - .settings(settings) - .primaryTerm(0, 1).build(); - IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); - TranslogOpToEngineOpConverter converter = new TranslogOpToEngineOpConverter(primary.shardId(), primary.mapperService()); - - Engine.Operation.Origin origin = randomFrom(Engine.Operation.Origin.values()); - // convert index op - Translog.Index translogIndexOp = new Translog.Index(randomAlphaOfLength(10), randomAlphaOfLength(10), randomNonNegativeLong(), - randomNonNegativeLong(), randomFrom(VersionType.values()), "{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), - randomAlphaOfLength(5), randomAlphaOfLength(5), randomLong()); - Engine.Index engineIndexOp = (Engine.Index) converter.convertToEngineOp(translogIndexOp, origin); - assertEquals(engineIndexOp.origin(), origin); - assertEquals(engineIndexOp.primaryTerm(), translogIndexOp.primaryTerm()); - assertEquals(engineIndexOp.seqNo(), translogIndexOp.seqNo()); - assertEquals(engineIndexOp.version(), translogIndexOp.version()); - assertEquals(engineIndexOp.versionType(), translogIndexOp.versionType().versionTypeForReplicationAndRecovery()); - assertEquals(engineIndexOp.id(), translogIndexOp.id()); - assertEquals(engineIndexOp.type(), translogIndexOp.type()); - assertEquals(engineIndexOp.getAutoGeneratedIdTimestamp(), translogIndexOp.getAutoGeneratedIdTimestamp()); - assertEquals(engineIndexOp.parent(), translogIndexOp.parent()); - assertEquals(engineIndexOp.routing(), translogIndexOp.routing()); - assertEquals(engineIndexOp.source(), translogIndexOp.source()); - - // convert delete op - Translog.Delete translogDeleteOp = new Translog.Delete(randomAlphaOfLength(5), randomAlphaOfLength(5), - new Term(randomAlphaOfLength(5), randomAlphaOfLength(5)), randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong(), randomFrom(VersionType.values())); - Engine.Delete engineDeleteOp = (Engine.Delete) converter.convertToEngineOp(translogDeleteOp, origin); - assertEquals(engineDeleteOp.origin(), origin); - assertEquals(engineDeleteOp.primaryTerm(), translogDeleteOp.primaryTerm()); - assertEquals(engineDeleteOp.seqNo(), translogDeleteOp.seqNo()); - assertEquals(engineDeleteOp.version(), translogDeleteOp.version()); - assertEquals(engineDeleteOp.versionType(), translogDeleteOp.versionType().versionTypeForReplicationAndRecovery()); - assertEquals(engineDeleteOp.id(), translogDeleteOp.id()); - assertEquals(engineDeleteOp.type(), translogDeleteOp.type()); - assertEquals(engineDeleteOp.uid(), translogDeleteOp.uid()); - - // convert noop - Translog.NoOp translogNoOp = new Translog.NoOp(randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(5)); - Engine.NoOp engineNoOp = (Engine.NoOp) converter.convertToEngineOp(translogNoOp, origin); - assertEquals(engineNoOp.origin(), origin); - assertEquals(engineNoOp.primaryTerm(), translogNoOp.primaryTerm()); - assertEquals(engineNoOp.seqNo(), translogNoOp.seqNo()); - assertEquals(engineNoOp.reason(), translogNoOp.reason()); - - closeShards(primary); - } - public void testShardActiveDuringInternalRecovery() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "type", "0"); @@ -1853,7 +1843,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(file.recovered(), file.length()); } } - targetShard.updateRoutingEntry(ShardRoutingHelper.moveToStarted(targetShard.routingEntry())); + IndexShardTestCase.updateRoutingEntry(targetShard, ShardRoutingHelper.moveToStarted(targetShard.routingEntry())); assertDocCount(targetShard, 2); } // now check that it's persistent ie. that the added shards are committed @@ -1880,22 +1870,7 @@ public class IndexShardTests extends IndexShardTestCase { final long numDocsToDelete = randomIntBetween((int) Math.ceil(Math.nextUp(numDocs / 10.0)), Math.toIntExact(numDocs)); for (int i = 0; i < numDocs; i++) { final String id = Integer.toString(i); - final ParsedDocument doc = - testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); - final Engine.Index index = - new Engine.Index( - new Term("_id", doc.id()), - doc, - SequenceNumbersService.UNASSIGNED_SEQ_NO, - 0, - Versions.MATCH_ANY, - VersionType.INTERNAL, - PRIMARY, - System.nanoTime(), - -1, - false); - final Engine.IndexResult result = indexShard.index(index); - assertThat(result.getVersion(), equalTo(1L)); + indexDoc(indexShard, "test", id); } indexShard.refresh("test"); @@ -1910,22 +1885,8 @@ public class IndexShardTests extends IndexShardTestCase { IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList())); for (final Integer i : ids) { final String id = Integer.toString(i); - final ParsedDocument doc = - testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); - final Engine.Index index = - new Engine.Index( - new Term("_id", doc.id()), - doc, - SequenceNumbersService.UNASSIGNED_SEQ_NO, - 0, - Versions.MATCH_ANY, - VersionType.INTERNAL, - PRIMARY, - System.nanoTime(), - -1, - false); - final Engine.IndexResult result = indexShard.index(index); - assertThat(result.getVersion(), equalTo(2L)); + deleteDoc(indexShard, "test", id); + indexDoc(indexShard, "test", id); } // flush the buffered deletes diff --git a/core/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/core/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java new file mode 100644 index 00000000000..c2c44421b84 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.action.resync.ResyncReplicationResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.tasks.TaskManager; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class PrimaryReplicaSyncerTests extends IndexShardTestCase { + + public void testSyncerSendsOffCorrectDocuments() throws Exception { + IndexShard shard = newStartedShard(true); + TaskManager taskManager = new TaskManager(Settings.EMPTY); + AtomicBoolean syncActionCalled = new AtomicBoolean(); + PrimaryReplicaSyncer.SyncAction syncAction = + (request, parentTask, allocationId, listener) -> { + logger.info("Sending off {} operations", request.getOperations().size()); + syncActionCalled.set(true); + assertThat(parentTask, instanceOf(PrimaryReplicaSyncer.ResyncTask.class)); + listener.onResponse(new ResyncReplicationResponse()); + }; + PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(Settings.EMPTY, taskManager, syncAction); + syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 100))); + + int numDocs = randomInt(10); + for (int i = 0; i < numDocs; i++) { + indexDoc(shard, "test", Integer.toString(i)); + } + + long globalCheckPoint = numDocs > 0 ? randomIntBetween(0, numDocs - 1) : 0; + boolean syncNeeded = numDocs > 0 && globalCheckPoint < numDocs - 1; + + String allocationId = shard.routingEntry().allocationId().getId(); + shard.updateShardState(shard.routingEntry(), shard.getPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), + Collections.emptySet()); + shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); + assertEquals(globalCheckPoint, shard.getGlobalCheckpoint()); + + logger.info("Total ops: {}, global checkpoint: {}", numDocs, globalCheckPoint); + + PlainActionFuture fut = new PlainActionFuture<>(); + syncer.resync(shard, fut); + fut.get(); + + if (syncNeeded) { + assertTrue("Sync action was not called", syncActionCalled.get()); + } + assertEquals(globalCheckPoint == numDocs - 1 ? 0 : numDocs, fut.get().getTotalOperations()); + if (syncNeeded) { + long skippedOps = globalCheckPoint + 1; // everything up to global checkpoint included + assertEquals(skippedOps, fut.get().getSkippedOperations()); + assertEquals(numDocs - skippedOps, fut.get().getResyncedOperations()); + } else { + assertEquals(0, fut.get().getSkippedOperations()); + assertEquals(0, fut.get().getResyncedOperations()); + } + + closeShards(shard); + } + + public void testStatusSerialization() throws IOException { + PrimaryReplicaSyncer.ResyncTask.Status status = new PrimaryReplicaSyncer.ResyncTask.Status(randomAlphaOfLength(10), + randomIntBetween(0, 1000), randomIntBetween(0, 1000), randomIntBetween(0, 1000)); + final BytesStreamOutput out = new BytesStreamOutput(); + status.writeTo(out); + final ByteBufferStreamInput in = new ByteBufferStreamInput(ByteBuffer.wrap(out.bytes().toBytesRef().bytes)); + PrimaryReplicaSyncer.ResyncTask.Status serializedStatus = new PrimaryReplicaSyncer.ResyncTask.Status(in); + assertEquals(status, serializedStatus); + } + + public void testStatusEquals() throws IOException { + PrimaryReplicaSyncer.ResyncTask task = new PrimaryReplicaSyncer.ResyncTask(0, "type", "action", "desc", null); + task.setPhase(randomAlphaOfLength(10)); + task.setResyncedOperations(randomIntBetween(0, 1000)); + task.setTotalOperations(randomIntBetween(0, 1000)); + task.setSkippedOperations(randomIntBetween(0, 1000)); + PrimaryReplicaSyncer.ResyncTask.Status status = task.getStatus(); + PrimaryReplicaSyncer.ResyncTask.Status sameStatus = task.getStatus(); + assertNotSame(status, sameStatus); + assertEquals(status, sameStatus); + assertEquals(status.hashCode(), sameStatus.hashCode()); + + switch (randomInt(3)) { + case 0: task.setPhase("otherPhase"); break; + case 1: task.setResyncedOperations(task.getResyncedOperations() + 1); break; + case 2: task.setSkippedOperations(task.getSkippedOperations() + 1); break; + case 3: task.setTotalOperations(task.getTotalOperations() + 1); break; + } + + PrimaryReplicaSyncer.ResyncTask.Status differentStatus = task.getStatus(); + assertNotEquals(status, differentStatus); + } + + public void testStatusReportsCorrectNumbers() throws IOException { + PrimaryReplicaSyncer.ResyncTask task = new PrimaryReplicaSyncer.ResyncTask(0, "type", "action", "desc", null); + task.setPhase(randomAlphaOfLength(10)); + task.setResyncedOperations(randomIntBetween(0, 1000)); + task.setTotalOperations(randomIntBetween(0, 1000)); + task.setSkippedOperations(randomIntBetween(0, 1000)); + PrimaryReplicaSyncer.ResyncTask.Status status = task.getStatus(); + XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); + status.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + String jsonString = jsonBuilder.string(); + assertThat(jsonString, containsString("\"phase\":\"" + task.getPhase() + "\"")); + assertThat(jsonString, containsString("\"totalOperations\":" + task.getTotalOperations())); + assertThat(jsonString, containsString("\"resyncedOperations\":" + task.getResyncedOperations())); + assertThat(jsonString, containsString("\"skippedOperations\":" + task.getSkippedOperations())); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java index dc7d620a97b..8d3ac8433d1 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.shard; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -32,11 +31,12 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ESTestCase; @@ -46,8 +46,11 @@ import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.security.AccessControlException; import java.util.Arrays; +import java.util.Map; import java.util.function.Predicate; +import static org.hamcrest.CoreMatchers.equalTo; + public class StoreRecoveryTests extends ESTestCase { public void testAddIndices() throws IOException { @@ -82,7 +85,9 @@ public class StoreRecoveryTests extends ESTestCase { StoreRecovery storeRecovery = new StoreRecovery(new ShardId("foo", "bar", 1), logger); RecoveryState.Index indexStats = new RecoveryState.Index(); Directory target = newFSDirectory(createTempDir()); - storeRecovery.addIndices(indexStats, target, indexSort, dirs); + final long maxSeqNo = randomNonNegativeLong(); + final long maxUnsafeAutoIdTimestamp = randomNonNegativeLong(); + storeRecovery.addIndices(indexStats, target, indexSort, dirs, maxSeqNo, maxUnsafeAutoIdTimestamp); int numFiles = 0; Predicate filesFilter = (f) -> f.startsWith("segments") == false && f.equals("write.lock") == false && f.startsWith("extra") == false; @@ -99,6 +104,10 @@ public class StoreRecoveryTests extends ESTestCase { } DirectoryReader reader = DirectoryReader.open(target); SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target); + final Map userData = segmentCommitInfos.getUserData(); + assertThat(userData.get(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(maxSeqNo))); + assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(Long.toString(maxSeqNo))); + assertThat(userData.get(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID), equalTo(Long.toString(maxUnsafeAutoIdTimestamp))); for (SegmentCommitInfo info : segmentCommitInfos) { // check that we didn't merge assertEquals("all sources must be flush", info.info.getDiagnostics().get("source"), "flush"); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java new file mode 100644 index 00000000000..05e05e05572 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.mockito.Mockito; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + + +public class TranslogDeletionPolicyTests extends ESTestCase { + + public static TranslogDeletionPolicy createTranslogDeletionPolicy() { + return new TranslogDeletionPolicy( + IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), + IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getDefault(Settings.EMPTY).getMillis() + ); + } + + public static TranslogDeletionPolicy createTranslogDeletionPolicy(IndexSettings indexSettings) { + return new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), + indexSettings.getTranslogRetentionAge().getMillis()); + } + + public void testNoRetention() throws IOException { + long now = System.currentTimeMillis(); + Tuple, TranslogWriter> readersAndWriter = createReadersAndWriter(now); + List allGens = new ArrayList<>(readersAndWriter.v1()); + allGens.add(readersAndWriter.v2()); + try { + TranslogDeletionPolicy deletionPolicy = new MockDeletionPolicy(now, 0, 0); + assertMinGenRequired(deletionPolicy, readersAndWriter, 1L); + final int committedReader = randomIntBetween(0, allGens.size() - 1); + final long committedGen = allGens.get(committedReader).generation; + deletionPolicy.setMinTranslogGenerationForRecovery(committedGen); + assertMinGenRequired(deletionPolicy, readersAndWriter, committedGen); + } finally { + IOUtils.close(readersAndWriter.v1()); + IOUtils.close(readersAndWriter.v2()); + } + } + + public void testBytesRetention() throws IOException { + long now = System.currentTimeMillis(); + Tuple, TranslogWriter> readersAndWriter = createReadersAndWriter(now); + List allGens = new ArrayList<>(readersAndWriter.v1()); + allGens.add(readersAndWriter.v2()); + try { + final int selectedReader = randomIntBetween(0, allGens.size() - 1); + final long selectedGeneration = allGens.get(selectedReader).generation; + long size = allGens.stream().skip(selectedReader).map(BaseTranslogReader::sizeInBytes).reduce(Long::sum).get(); + assertThat(TranslogDeletionPolicy.getMinTranslogGenBySize(readersAndWriter.v1(), readersAndWriter.v2(), size), + equalTo(selectedGeneration)); + assertThat(TranslogDeletionPolicy.getMinTranslogGenBySize(readersAndWriter.v1(), readersAndWriter.v2(), -1), + equalTo(Long.MIN_VALUE)); + } finally { + IOUtils.close(readersAndWriter.v1()); + IOUtils.close(readersAndWriter.v2()); + } + } + + public void testAgeRetention() throws IOException { + long now = System.currentTimeMillis(); + Tuple, TranslogWriter> readersAndWriter = createReadersAndWriter(now); + List allGens = new ArrayList<>(readersAndWriter.v1()); + allGens.add(readersAndWriter.v2()); + try { + final int selectedReader = randomIntBetween(0, allGens.size() - 1); + final long selectedGeneration = allGens.get(selectedReader).generation; + long maxAge = now - allGens.get(selectedReader).getLastModifiedTime(); + assertThat(TranslogDeletionPolicy.getMinTranslogGenByAge(readersAndWriter.v1(), readersAndWriter.v2(), maxAge, now), + equalTo(selectedGeneration)); + assertThat(TranslogDeletionPolicy.getMinTranslogGenByAge(readersAndWriter.v1(), readersAndWriter.v2(), -1, now), + equalTo(Long.MIN_VALUE)); + } finally { + IOUtils.close(readersAndWriter.v1()); + IOUtils.close(readersAndWriter.v2()); + } + } + + /** + * Tests that age trumps size but recovery trumps both. + */ + public void testRetentionHierarchy() throws IOException { + long now = System.currentTimeMillis(); + Tuple, TranslogWriter> readersAndWriter = createReadersAndWriter(now); + List allGens = new ArrayList<>(readersAndWriter.v1()); + allGens.add(readersAndWriter.v2()); + try { + TranslogDeletionPolicy deletionPolicy = new MockDeletionPolicy(now, Long.MAX_VALUE, Long.MAX_VALUE); + deletionPolicy.setMinTranslogGenerationForRecovery(Long.MAX_VALUE); + int selectedReader = randomIntBetween(0, allGens.size() - 1); + final long selectedGenerationByAge = allGens.get(selectedReader).generation; + long maxAge = now - allGens.get(selectedReader).getLastModifiedTime(); + selectedReader = randomIntBetween(0, allGens.size() - 1); + final long selectedGenerationBySize = allGens.get(selectedReader).generation; + long size = allGens.stream().skip(selectedReader).map(BaseTranslogReader::sizeInBytes).reduce(Long::sum).get(); + deletionPolicy.setRetentionAgeInMillis(maxAge); + deletionPolicy.setRetentionSizeInBytes(size); + assertMinGenRequired(deletionPolicy, readersAndWriter, Math.max(selectedGenerationByAge, selectedGenerationBySize)); + // make a new policy as committed gen can't go backwards (for now) + deletionPolicy = new MockDeletionPolicy(now, size, maxAge); + long committedGen = randomFrom(allGens).generation; + deletionPolicy.setMinTranslogGenerationForRecovery(committedGen); + assertMinGenRequired(deletionPolicy, readersAndWriter, + Math.min(committedGen, Math.max(selectedGenerationByAge, selectedGenerationBySize))); + long viewGen = randomFrom(allGens).generation; + deletionPolicy.acquireTranslogGenForView(viewGen); + assertMinGenRequired(deletionPolicy, readersAndWriter, + Math.min( + Math.min(committedGen, viewGen), + Math.max(selectedGenerationByAge, selectedGenerationBySize))); + // disable age + deletionPolicy.setRetentionAgeInMillis(-1); + assertMinGenRequired(deletionPolicy, readersAndWriter, Math.min(Math.min(committedGen, viewGen), selectedGenerationBySize)); + // disable size + deletionPolicy.setRetentionAgeInMillis(maxAge); + deletionPolicy.setRetentionSizeInBytes(-1); + assertMinGenRequired(deletionPolicy, readersAndWriter, Math.min(Math.min(committedGen, viewGen), selectedGenerationByAge)); + // disable both + deletionPolicy.setRetentionAgeInMillis(-1); + deletionPolicy.setRetentionSizeInBytes(-1); + assertMinGenRequired(deletionPolicy, readersAndWriter, Math.min(committedGen, viewGen)); + } finally { + IOUtils.close(readersAndWriter.v1()); + IOUtils.close(readersAndWriter.v2()); + } + + } + + private void assertMinGenRequired(TranslogDeletionPolicy deletionPolicy, Tuple, TranslogWriter> readersAndWriter, + long expectedGen) throws IOException { + assertThat(deletionPolicy.minTranslogGenRequired(readersAndWriter.v1(), readersAndWriter.v2()), equalTo(expectedGen)); + } + + private Tuple, TranslogWriter> createReadersAndWriter(final long now) throws IOException { + final Path tempDir = createTempDir(); + Files.createFile(tempDir.resolve(Translog.CHECKPOINT_FILE_NAME)); + TranslogWriter writer = null; + List readers = new ArrayList<>(); + final int numberOfReaders = randomIntBetween(0, 10); + for (long gen = 1; gen <= numberOfReaders + 1; gen++) { + if (writer != null) { + final TranslogReader reader = Mockito.spy(writer.closeIntoReader()); + Mockito.doReturn(writer.getLastModifiedTime()).when(reader).getLastModifiedTime(); + readers.add(reader); + } + writer = TranslogWriter.create(new ShardId("index", "uuid", 0), "translog_uuid", gen, + tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, () -> 1L, 1L, () -> 1L + ); + writer = Mockito.spy(writer); + Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); + + byte[] bytes = new byte[4]; + ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); + + for (int ops = randomIntBetween(0, 20); ops > 0; ops--) { + out.reset(bytes); + out.writeInt(ops); + writer.add(new BytesArray(bytes), ops); + } + } + return new Tuple<>(readers, writer); + } + + private static class MockDeletionPolicy extends TranslogDeletionPolicy { + + long now; + + MockDeletionPolicy(long now, long retentionSizeInBytes, long maxRetentionAgeInMillis) { + super(retentionSizeInBytes, maxRetentionAgeInMillis); + this.now = now; + } + + @Override + protected long currentTime() { + return now; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 4fe97919c38..dc78854f272 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,6 +62,8 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.LocalCheckpointTrackerTests; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog.Location; @@ -106,6 +107,7 @@ import java.util.stream.LongStream; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; +import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -141,7 +143,8 @@ public class TranslogTests extends ESTestCase { } protected Translog createTranslog(TranslogConfig config, String translogUUID) throws IOException { - return new Translog(config, translogUUID, new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), + () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); } private void markCurrentGenAsCommitted(Translog translog) throws IOException { @@ -156,12 +159,10 @@ public class TranslogTests extends ESTestCase { private void commit(Translog translog, long genToCommit) throws IOException { final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); deletionPolicy.setMinTranslogGenerationForRecovery(genToCommit); + long minGenRequired = deletionPolicy.minTranslogGenRequired(translog.getReaders(), translog.getCurrent()); translog.trimUnreferencedReaders(); - if (deletionPolicy.pendingViewsCount() == 0) { - assertThat(deletionPolicy.minTranslogGenRequired(), equalTo(genToCommit)); - } - // we may have some views closed concurrently causing the deletion policy to increase it's minTranslogGenRequired - assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(deletionPolicy.minTranslogGenRequired())); + assertThat(minGenRequired, equalTo(translog.getMinFileGeneration())); + assertFilePresences(translog); } @Override @@ -186,14 +187,19 @@ public class TranslogTests extends ESTestCase { private Translog create(Path path) throws IOException { globalCheckpoint = new AtomicLong(SequenceNumbersService.UNASSIGNED_SEQ_NO); - return new Translog(getTranslogConfig(path), null, new TranslogDeletionPolicy(), () -> globalCheckpoint.get()); + final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); + return new Translog(translogConfig, null, deletionPolicy, () -> globalCheckpoint.get()); } private TranslogConfig getTranslogConfig(final Path path) { final Settings settings = Settings - .builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) - .build(); + .builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + // only randomize between nog age retention and a long one, so failures will have a chance of reproducing + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomBoolean() ? "-1ms" : "1h") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") + .build(); return getTranslogConfig(path, settings); } @@ -318,7 +324,7 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot.totalOperations(), equalTo(ops.size())); markCurrentGenAsCommitted(translog); - snapshot = translog.newSnapshot(); + snapshot = translog.newSnapshot(firstId + 1); assertThat(snapshot, SnapshotMatchers.size(0)); assertThat(snapshot.totalOperations(), equalTo(0)); } @@ -338,49 +344,60 @@ public class TranslogTests extends ESTestCase { } public void testStats() throws IOException { + // self control cleaning for test + translog.getDeletionPolicy().setRetentionSizeInBytes(1024 * 1024); + translog.getDeletionPolicy().setRetentionAgeInMillis(3600 * 1000); final long firstOperationPosition = translog.getFirstOperationPosition(); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(0L)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(0)); } assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC))); translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(1L)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(1)); assertThat(stats.getTranslogSizeInBytes(), equalTo(97L)); + assertThat(stats.getUncommittedOperations(), equalTo(1)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(97L)); } translog.add(new Translog.Delete("test", "2", 1, newUid("2"))); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(2L)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(2)); assertThat(stats.getTranslogSizeInBytes(), equalTo(146L)); + assertThat(stats.getUncommittedOperations(), equalTo(2)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(146L)); } translog.add(new Translog.Delete("test", "3", 2, newUid("3"))); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(3L)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(3)); assertThat(stats.getTranslogSizeInBytes(), equalTo(195L)); + assertThat(stats.getUncommittedOperations(), equalTo(3)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(195L)); } translog.add(new Translog.NoOp(3, 1, randomAlphaOfLength(16))); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(4L)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); assertThat(stats.getTranslogSizeInBytes(), equalTo(237L)); + assertThat(stats.getUncommittedOperations(), equalTo(4)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(237L)); } final long expectedSizeInBytes = 280L; translog.rollGeneration(); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(4L)); - assertThat( - stats.getTranslogSizeInBytes(), - equalTo(expectedSizeInBytes)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes)); + assertThat(stats.getUncommittedOperations(), equalTo(4)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(expectedSizeInBytes)); } { @@ -390,22 +407,25 @@ public class TranslogTests extends ESTestCase { final TranslogStats copy = new TranslogStats(); copy.readFrom(out.bytes().streamInput()); - assertThat(copy.estimatedNumberOfOperations(), equalTo(4L)); + assertThat(copy.estimatedNumberOfOperations(), equalTo(4)); assertThat(copy.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes)); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); copy.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertThat(builder.string(), equalTo("{\"translog\":{\"operations\":4,\"size_in_bytes\":" + expectedSizeInBytes + "}}")); + assertThat(builder.string(), equalTo("{\"translog\":{\"operations\":4,\"size_in_bytes\":" + expectedSizeInBytes + + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + expectedSizeInBytes + "}}")); } } markCurrentGenAsCommitted(translog); { final TranslogStats stats = stats(); - assertThat(stats.estimatedNumberOfOperations(), equalTo(0L)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(firstOperationPosition)); + assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes)); + assertThat(stats.getUncommittedOperations(), equalTo(0)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(firstOperationPosition)); } } @@ -414,27 +434,38 @@ public class TranslogTests extends ESTestCase { final int n = randomIntBetween(0, 16); final List statsList = new ArrayList<>(n); for (int i = 0; i < n; i++) { - final TranslogStats stats = new TranslogStats(randomIntBetween(1, 4096), randomIntBetween(1, 1 << 20)); + final TranslogStats stats = new TranslogStats(randomIntBetween(1, 4096), randomIntBetween(1, 1 << 20), + randomIntBetween(1, 1 << 20), randomIntBetween(1, 4096)); statsList.add(stats); total.add(stats); } assertThat( total.estimatedNumberOfOperations(), - equalTo(statsList.stream().mapToLong(TranslogStats::estimatedNumberOfOperations).sum())); + equalTo(statsList.stream().mapToInt(TranslogStats::estimatedNumberOfOperations).sum())); assertThat( total.getTranslogSizeInBytes(), equalTo(statsList.stream().mapToLong(TranslogStats::getTranslogSizeInBytes).sum())); + assertThat( + total.getUncommittedOperations(), + equalTo(statsList.stream().mapToInt(TranslogStats::getUncommittedOperations).sum())); + assertThat( + total.getUncommittedSizeInBytes(), + equalTo(statsList.stream().mapToLong(TranslogStats::getUncommittedSizeInBytes).sum())); } public void testNegativeNumberOfOperations() { - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1)); assertThat(e, hasToString(containsString("numberOfOperations must be >= 0"))); + e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1)); + assertThat(e, hasToString(containsString("uncommittedOperations must be >= 0"))); } public void testNegativeSizeInBytes() { - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1)); assertThat(e, hasToString(containsString("translogSizeInBytes must be >= 0"))); + e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1)); + assertThat(e, hasToString(containsString("uncommittedSizeInBytes must be >= 0"))); } public void testSnapshot() throws IOException { @@ -720,7 +751,9 @@ public class TranslogTests extends ESTestCase { final AtomicBoolean run = new AtomicBoolean(true); final Object flushMutex = new Object(); - + final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); + final LocalCheckpointTracker tracker = LocalCheckpointTrackerTests.createEmptyTracker(); + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); // any errors on threads final List errors = new CopyOnWriteArrayList<>(); logger.debug("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps); @@ -733,7 +766,7 @@ public class TranslogTests extends ESTestCase { barrier.await(); int counter = 0; while (run.get() && idGenerator.get() < maxOps) { - long id = idGenerator.incrementAndGet(); + long id = idGenerator.getAndIncrement(); final Translog.Operation op; final Translog.Operation.Type type = Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type.values().length))]; @@ -752,6 +785,7 @@ public class TranslogTests extends ESTestCase { throw new AssertionError("unsupported operation type [" + type + "]"); } Translog.Location location = translog.add(op); + tracker.markSeqNoAsCompleted(id); Translog.Location existing = writtenOps.put(op, location); if (existing != null) { fail("duplicate op [" + op + "], old entry at " + location); @@ -763,7 +797,12 @@ public class TranslogTests extends ESTestCase { synchronized (flushMutex) { // we need not do this concurrently as we need to make sure that the generation // we're committing - is still present when we're committing - rollAndCommit(translog); + long localCheckpoint = tracker.getCheckpoint() + 1; + translog.rollGeneration(); + deletionPolicy.setMinTranslogGenerationForRecovery( + translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration); + translog.trimUnreferencedReaders(); + lastCommittedLocalCheckpoint.set(localCheckpoint); } } if (id % 7 == 0) { @@ -789,7 +828,7 @@ public class TranslogTests extends ESTestCase { final String threadId = "reader_" + i; readers[i] = new Thread(new AbstractRunnable() { Translog.View view = null; - Set writtenOpsAtView; + long committedLocalCheckpointAtView; @Override public void onFailure(Exception e) { @@ -812,9 +851,10 @@ public class TranslogTests extends ESTestCase { void newView() throws IOException { closeView(); view = translog.newView(); - // captures the currently written ops so we know what to expect from the view - writtenOpsAtView = new HashSet<>(writtenOps.keySet()); - logger.debug("--> [{}] opened view from [{}]", threadId, view.minTranslogGeneration()); + // captures the last committed checkpoint, while holding the view, simulating + // recovery logic which captures a view and gets a lucene commit + committedLocalCheckpointAtView = lastCommittedLocalCheckpoint.get(); + logger.debug("--> [{}] opened view from [{}]", threadId, view.viewGenToRelease); } @Override @@ -829,23 +869,18 @@ public class TranslogTests extends ESTestCase { // captures al views that are written since the view was created (with a small caveat see bellow) // these are what we expect the snapshot to return (and potentially some more). Set expectedOps = new HashSet<>(writtenOps.keySet()); - expectedOps.removeAll(writtenOpsAtView); - Translog.Snapshot snapshot = view.snapshot(); + expectedOps.removeIf(op -> op.seqNo() <= committedLocalCheckpointAtView); + Translog.Snapshot snapshot = view.snapshot(committedLocalCheckpointAtView + 1L); Translog.Operation op; while ((op = snapshot.next()) != null) { expectedOps.remove(op); } if (expectedOps.isEmpty() == false) { - StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()).append(" operations"); + StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()) + .append(" operations from [").append(committedLocalCheckpointAtView + 1L).append("]"); boolean failed = false; for (Translog.Operation expectedOp : expectedOps) { final Translog.Location loc = writtenOps.get(expectedOp); - if (loc.generation < view.minTranslogGeneration()) { - // writtenOps is only updated after the op was written to the translog. This mean - // that ops written to the translog before the view was taken (and will be missing from the view) - // may yet be available in writtenOpsAtView, meaning we will erroneously expect them - continue; - } failed = true; missed.append("\n --> [").append(expectedOp).append("] written at ").append(loc); } @@ -855,7 +890,7 @@ public class TranslogTests extends ESTestCase { } // slow down things a bit and spread out testing.. synchronized (signalReaderSomeDataWasIndexed) { - if (idGenerator.get() < maxOps){ + if (idGenerator.get() < maxOps) { signalReaderSomeDataWasIndexed.wait(); } } @@ -1104,7 +1139,12 @@ public class TranslogTests extends ESTestCase { } writer.sync(); final Checkpoint writerCheckpoint = writer.getCheckpoint(); - try (TranslogReader reader = writer.closeIntoReader()) { + TranslogReader reader = writer.closeIntoReader(); + try { + if (randomBoolean()) { + reader.close(); + reader = translog.openReader(reader.path(), writerCheckpoint); + } for (int i = 0; i < numOps; i++) { final ByteBuffer buffer = ByteBuffer.allocate(4); reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); @@ -1114,6 +1154,8 @@ public class TranslogTests extends ESTestCase { } final Checkpoint readerCheckpoint = reader.getCheckpoint(); assertThat(readerCheckpoint, equalTo(writerCheckpoint)); + } finally { + IOUtils.close(reader); } } } @@ -1148,7 +1190,7 @@ public class TranslogTests extends ESTestCase { translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); - Translog.Snapshot snapshot = translog.newSnapshot(); + Translog.Snapshot snapshot = translog.newSnapshot(translogGeneration.translogFileGeneration); for (int i = minUncommittedOp; i < translogOperations; i++) { assertEquals("expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, locations.get(i).generation); Translog.Operation next = snapshot.next(); @@ -1376,13 +1418,13 @@ public class TranslogTests extends ESTestCase { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); - Translog.Snapshot snapshot = this.translog.newSnapshot(); + Translog.Snapshot snapshot = this.translog.newSnapshot(translogGeneration.translogFileGeneration); for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); assertNotNull("" + i, next); @@ -1602,7 +1644,7 @@ public class TranslogTests extends ESTestCase { Path tempDir = createTempDir(); final FailSwitch fail = new FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); - Translog translog = getFailableTranslog(fail, config, false, true, null, new TranslogDeletionPolicy()); + Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy()); LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly translog.add(new Translog.Index("test", "1", 0, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); fail.failAlways(); @@ -1697,7 +1739,7 @@ public class TranslogTests extends ESTestCase { iterator.remove(); } } - try (Translog tlog = new Translog(config, translogUUID, new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { + try (Translog tlog = new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { Translog.Snapshot snapshot = tlog.newSnapshot(); if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -1740,7 +1782,7 @@ public class TranslogTests extends ESTestCase { // engine blows up, after committing the above generation translog.close(); TranslogConfig config = translog.getConfig(); - final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); assertThat(translog.getMinFileGeneration(), equalTo(1L)); @@ -1766,6 +1808,10 @@ public class TranslogTests extends ESTestCase { final long comittedGeneration; final String translogUUID; try (Translog translog = getFailableTranslog(fail, config)) { + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); + // disable retention so we trim things + deletionPolicy.setRetentionSizeInBytes(-1); + deletionPolicy.setRetentionAgeInMillis(-1); translogUUID = translog.getTranslogUUID(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { @@ -1782,14 +1828,15 @@ public class TranslogTests extends ESTestCase { translog.rollGeneration(); } } + deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); fail.failRandomly(); try { - commit(translog, comittedGeneration); + translog.trimUnreferencedReaders(); } catch (Exception e) { // expected... } } - final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { // we don't know when things broke exactly @@ -1803,7 +1850,7 @@ public class TranslogTests extends ESTestCase { } private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException { - return getFailableTranslog(fail, config, randomBoolean(), false, null, new TranslogDeletionPolicy()); + return getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy()); } private static class FailSwitch { @@ -1965,7 +2012,7 @@ public class TranslogTests extends ESTestCase { translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), new TranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO) { @Override protected TranslogWriter createWriter(long fileGeneration) throws IOException { throw new MockDirectoryWrapper.FakeIOException(); @@ -2083,7 +2130,7 @@ public class TranslogTests extends ESTestCase { String generationUUID = null; try { boolean committing = false; - final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, new TranslogDeletionPolicy()); + final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, createTranslogDeletionPolicy()); try { LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { @@ -2142,7 +2189,7 @@ public class TranslogTests extends ESTestCase { // now randomly open this failing tlog again just to make sure we can also recover from failing during recovery if (randomBoolean()) { try { - TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(); deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); IOUtils.close(getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, deletionPolicy)); } catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) { @@ -2153,10 +2200,10 @@ public class TranslogTests extends ESTestCase { } fail.failNever(); // we don't wanna fail here but we might since we write a new checkpoint and create a new tlog file - TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(); deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) { - Translog.Snapshot snapshot = translog.newSnapshot(); + Translog.Snapshot snapshot = translog.newSnapshot(minGenForRecovery); assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { Translog.Operation next = snapshot.next(); @@ -2218,7 +2265,7 @@ public class TranslogTests extends ESTestCase { translog.rollGeneration(); TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); - final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO); translog.add(new Translog.Index("test", "2", 1, new byte[]{2})); @@ -2293,7 +2340,17 @@ public class TranslogTests extends ESTestCase { assertEquals("my_id", serializedDelete.id()); } - public void testRollGeneration() throws IOException { + public void testRollGeneration() throws Exception { + // make sure we keep some files around + final boolean longRetention = randomBoolean(); + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); + if (longRetention) { + deletionPolicy.setRetentionAgeInMillis(3600 * 1000); + } else { + deletionPolicy.setRetentionAgeInMillis(-1); + } + // we control retention via time, disable size based calculations for simplicity + deletionPolicy.setRetentionSizeInBytes(-1); final long generation = translog.currentFileGeneration(); final int rolls = randomIntBetween(1, 16); int totalOperations = 0; @@ -2315,70 +2372,25 @@ public class TranslogTests extends ESTestCase { } commit(translog, generation + rolls); assertThat(translog.currentFileGeneration(), equalTo(generation + rolls )); - assertThat(translog.totalOperations(), equalTo(0)); - for (int i = 0; i < rolls; i++) { - assertFileDeleted(translog, generation + i); - } - assertFileIsPresent(translog, generation + rolls); - } - - public void testRollGenerationBetweenPrepareCommitAndCommit() throws IOException { - final long generation = translog.currentFileGeneration(); - int seqNo = 0; - - final int rollsBefore = randomIntBetween(0, 16); - for (int r = 1; r <= rollsBefore; r++) { - final int operationsBefore = randomIntBetween(1, 256); - for (int i = 0; i < operationsBefore; i++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test")); - } - - try (Releasable ignored = translog.writeLock.acquire()) { - translog.rollGeneration(); - } - - assertThat(translog.currentFileGeneration(), equalTo(generation + r)); - for (int i = 0; i <= r; i++) { - assertFileIsPresent(translog, generation + r); - } - } - - assertThat(translog.currentFileGeneration(), equalTo(generation + rollsBefore)); - translog.rollGeneration(); - assertThat(translog.currentFileGeneration(), equalTo(generation + rollsBefore + 1)); - - for (int i = 0; i <= rollsBefore + 1; i++) { - assertFileIsPresent(translog, generation + i); - } - - final int rollsBetween = randomIntBetween(0, 16); - for (int r = 1; r <= rollsBetween; r++) { - final int operationsBetween = randomIntBetween(1, 256); - for (int i = 0; i < operationsBetween; i++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test")); - } - - try (Releasable ignored = translog.writeLock.acquire()) { - translog.rollGeneration(); - } - - assertThat( - translog.currentFileGeneration(), - equalTo(generation + rollsBefore + 1 + r)); - for (int i = 0; i <= rollsBefore + 1 + r; i++) { + assertThat(translog.uncommittedOperations(), equalTo(0)); + if (longRetention) { + for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); } + deletionPolicy.setRetentionAgeInMillis(randomBoolean() ? 100 : -1); + assertBusy(() -> { + translog.trimUnreferencedReaders(); + for (int i = 0; i < rolls; i++) { + assertFileDeleted(translog, generation + i); + } + }); + } else { + // immediate cleanup + for (int i = 0; i < rolls; i++) { + assertFileDeleted(translog, generation + i); + } } - - commit(translog, generation + rollsBefore + 1); - - for (int i = 0; i <= rollsBefore; i++) { - assertFileDeleted(translog, generation + i); - } - for (int i = rollsBefore + 1; i <= rollsBefore + 1 + rollsBetween; i++) { - assertFileIsPresent(translog, generation + i); - } - + assertFileIsPresent(translog, generation + rolls); } public void testMinGenerationForSeqNo() throws IOException { @@ -2444,65 +2456,26 @@ public class TranslogTests extends ESTestCase { final long generation = randomIntBetween(1, Math.toIntExact(translog.currentFileGeneration())); commit(translog, generation); - for (long g = 0; g < generation; g++) { - assertFileDeleted(translog, g); - } - for (long g = generation; g <= translog.currentFileGeneration(); g++) { - assertFileIsPresent(translog, g); - } } - public void testPrepareCommitAndCommit() throws IOException { + public void testOpenViewIsPassToDeletionPolicy() throws IOException { final int operations = randomIntBetween(1, 4096); - long seqNo = 0; - long last = -1; + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); for (int i = 0; i < operations; i++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test")); + translog.add(new Translog.NoOp(i, 0, "test")); if (rarely()) { - final long generation = translog.currentFileGeneration(); translog.rollGeneration(); - if (rarely()) { - // simulate generation filling up and rolling between preparing the commit and committing - translog.rollGeneration(); - } - final int committedGeneration = randomIntBetween(Math.max(1, Math.toIntExact(last)), Math.toIntExact(generation)); - commit(translog, committedGeneration); - last = committedGeneration; - for (long g = 0; g < committedGeneration; g++) { - assertFileDeleted(translog, g); - } - for (long g = committedGeneration; g <= translog.currentFileGeneration(); g++) { - assertFileIsPresent(translog, g); - } } - } - } - - public void testCommitWithOpenView() throws IOException { - final int operations = randomIntBetween(1, 4096); - long seqNo = 0; - long lastCommittedGeneration = -1; - for (int i = 0; i < operations; i++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test")); if (rarely()) { - try (Translog.View ignored = translog.newView()) { - final long viewGeneration = lastCommittedGeneration; - translog.rollGeneration(); - final long committedGeneration = randomIntBetween( - Math.max(1, Math.toIntExact(lastCommittedGeneration)), - Math.toIntExact(translog.currentFileGeneration())); - commit(translog, committedGeneration); - lastCommittedGeneration = committedGeneration; - // with an open view, committing should preserve generations back to the last committed generation - for (long g = 1; g < Math.min(lastCommittedGeneration, viewGeneration); g++) { - assertFileDeleted(translog, g); - } - // the view generation could be -1 if no commit has been performed - final long max = Math.max(1, Math.min(lastCommittedGeneration, viewGeneration)); - for (long g = max; g <= translog.currentFileGeneration(); g++) { - assertFileIsPresent(translog, g); - } + commit(translog, randomLongBetween(deletionPolicy.getMinTranslogGenerationForRecovery(), translog.currentFileGeneration())); + } + if (frequently()) { + long viewGen; + try (Translog.View view = translog.newView()) { + viewGen = view.viewGenToRelease; + assertThat(deletionPolicy.getViewCount(view.viewGenToRelease), equalTo(1L)); } + assertThat(deletionPolicy.getViewCount(viewGen), equalTo(0L)); } } } diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index fb524f27591..d8367b0d6a6 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardIT; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -405,14 +406,11 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { imc.forceCheck(); // We must assertBusy because the writeIndexingBufferAsync is done in background (REFRESH) thread pool: - assertBusy(new Runnable() { - @Override - public void run() { - try (Engine.Searcher s2 = shard.acquireSearcher("index")) { - // 100 buffered deletes will easily exceed our 1 KB indexing buffer so it should trigger a write: - final long indexingBufferBytes2 = shard.getIndexBufferRAMBytesUsed(); - assertTrue(indexingBufferBytes2 < indexingBufferBytes1); - } + assertBusy(() -> { + try (Engine.Searcher s2 = shard.acquireSearcher("index")) { + // 100 buffered deletes will easily exceed our 1 KB indexing buffer so it should trigger a write: + final long indexingBufferBytes2 = shard.getIndexBufferRAMBytesUsed(); + assertTrue(indexingBufferBytes2 < indexingBufferBytes1); } }); } @@ -453,7 +451,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { assertEquals(1, imc.availableShards().size()); assertTrue(newShard.recoverFromStore()); assertTrue("we should have flushed in IMC at least once but did: " + flushes.get(), flushes.get() >= 1); - newShard.updateRoutingEntry(routing.moveToStarted()); + IndexShardTestCase.updateRoutingEntry(newShard, routing.moveToStarted()); } finally { newShard.close("simon says", false); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 1fe4def9623..0dc760d63bf 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.indices.recovery.RecoveryState; @@ -130,14 +131,14 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas .updateUnassigned(unassignedInfo, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); IndexShard shard = index.createShard(newRouting); - shard.updateRoutingEntry(newRouting); + IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); final DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("store", new RecoveryState(newRouting, localNode, null)); shard.recoverFromStore(); newRouting = ShardRoutingHelper.moveToStarted(newRouting); - shard.updateRoutingEntry(newRouting); + IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(6, counter.get()); } finally { indicesService.removeIndex(idx, DELETED, "simon says"); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index b776b13dae9..76d5be5ea1e 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; @@ -48,6 +49,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.util.Arrays; import java.util.Collection; @@ -68,7 +70,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(TestPlugin.class); + return Arrays.asList(TestPlugin.class, InternalSettingsPlugin.class); } public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception { @@ -564,7 +566,40 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase { verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=text"), true); for (String index : Arrays.asList("foo", "foobar", "bar", "barbaz")) { - assertAcked(prepareCreate(index).setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate(index)); + } + + verify(client().admin().indices().preparePutMapping("foo").setType("type").setSource("field", "type=text"), false); + assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type"), notNullValue()); + verify(client().admin().indices().preparePutMapping("b*").setType("type").setSource("field", "type=text"), false); + assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type"), notNullValue()); + verify(client().admin().indices().preparePutMapping("_all").setType("type").setSource("field", "type=text"), false); + assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type"), notNullValue()); + verify(client().admin().indices().preparePutMapping().setType("type").setSource("field", "type=text"), false); + assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type"), notNullValue()); + assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type"), notNullValue()); + + + verify(client().admin().indices().preparePutMapping("c*").setType("type").setSource("field", "type=text"), true); + + assertAcked(client().admin().indices().prepareClose("barbaz").get()); + verify(client().admin().indices().preparePutMapping("barbaz").setType("type").setSource("field", "type=text"), false); + assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type"), notNullValue()); + } + + public void testPutMappingMultiType() throws Exception { + assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); + verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=text"), true); + verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=text"), true); + + for (String index : Arrays.asList("foo", "foobar", "bar", "barbaz")) { + assertAcked(prepareCreate(index).setSettings("index.version.created", Version.V_5_6_0.id)); // allows for multiple types } verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=text"), false); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index e87dc24c8f8..ec21d94cf30 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -18,7 +18,10 @@ */ package org.elasticsearch.indices; +import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexGraveyard; @@ -41,6 +44,9 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.IllegalIndexShardStateException; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.BM25SimilarityProvider; @@ -55,6 +61,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -66,6 +73,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class IndicesServiceTests extends ESSingleNodeTestCase { @@ -369,4 +378,57 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test"), instanceOf(BM25SimilarityProvider.class)); } + + public void testStatsByShardDoesNotDieFromExpectedExceptions() { + final int shardCount = randomIntBetween(2, 5); + final int failedShardId = randomIntBetween(0, shardCount - 1); + + final Index index = new Index("test-index", "abc123"); + // the shard that is going to fail + final ShardId shardId = new ShardId(index, failedShardId); + + final List shards = new ArrayList<>(shardCount); + final List shardStats = new ArrayList<>(shardCount - 1); + + final IndexShardState state = randomFrom(IndexShardState.values()); + final String message = "TEST - expected"; + + final RuntimeException expectedException = + randomFrom(new IllegalIndexShardStateException(shardId, state, message), new AlreadyClosedException(message)); + + // this allows us to control the indices that exist + final IndicesService mockIndicesService = mock(IndicesService.class); + final IndexService indexService = mock(IndexService.class); + + // generate fake shards and their responses + for (int i = 0; i < shardCount; ++i) { + final IndexShard shard = mock(IndexShard.class); + + shards.add(shard); + + if (failedShardId != i) { + final IndexShardStats successfulShardStats = mock(IndexShardStats.class); + + shardStats.add(successfulShardStats); + + when(mockIndicesService.indexShardStats(mockIndicesService, shard, CommonStatsFlags.ALL)).thenReturn(successfulShardStats); + } else { + when(mockIndicesService.indexShardStats(mockIndicesService, shard, CommonStatsFlags.ALL)).thenThrow(expectedException); + } + } + + when(mockIndicesService.iterator()).thenReturn(Collections.singleton(indexService).iterator()); + when(indexService.iterator()).thenReturn(shards.iterator()); + when(indexService.index()).thenReturn(index); + + // real one, which has a logger defined + final IndicesService indicesService = getIndicesService(); + + final Map> indexStats = indicesService.statsByShard(mockIndicesService, CommonStatsFlags.ALL); + + assertThat(indexStats.isEmpty(), equalTo(false)); + assertThat("index not defined", indexStats.containsKey(index), equalTo(true)); + assertThat("unexpected shard stats", indexStats.get(index), equalTo(shardStats)); + } + } diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index b3394d4f4fa..a740f96cdd8 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -47,7 +47,7 @@ import org.elasticsearch.index.analysis.PreConfiguredTokenizer; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; -import org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory; +import org.elasticsearch.index.analysis.MyFilterTokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.test.ESTestCase; @@ -196,18 +196,6 @@ public class AnalysisModuleTests extends ESTestCase { // assertThat(czechstemmeranalyzer.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class)); // assertThat(czechstemmeranalyzer.tokenFilters().length, equalTo(4)); // assertThat(czechstemmeranalyzer.tokenFilters()[3], instanceOf(CzechStemTokenFilterFactory.class)); -// -// // check dictionary decompounder -// analyzer = analysisService.analyzer("decompoundingAnalyzer").analyzer(); -// assertThat(analyzer, instanceOf(CustomAnalyzer.class)); -// CustomAnalyzer dictionaryDecompounderAnalyze = (CustomAnalyzer) analyzer; -// assertThat(dictionaryDecompounderAnalyze.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class)); -// assertThat(dictionaryDecompounderAnalyze.tokenFilters().length, equalTo(1)); -// assertThat(dictionaryDecompounderAnalyze.tokenFilters()[0], instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); - - Set wordList = Analysis.getWordSet(null, Version.CURRENT, settings, "index.analysis.filter.dict_dec.word_list"); - MatcherAssert.assertThat(wordList.size(), equalTo(6)); -// MatcherAssert.assertThat(wordList, hasItems("donau", "dampf", "schiff", "spargel", "creme", "suppe")); } public void testWordListPath() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index ad51a5d6942..6e0c61c1544 100644 --- a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -26,7 +26,9 @@ import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.core.IsNull; import java.io.IOException; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -91,16 +93,16 @@ public class AnalyzeActionIT extends ESIntegTestCase { assertThat(analyzeResponse.getTokens().size(), equalTo(1)); assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test")); - analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("standard").addTokenFilter("lowercase").addTokenFilter("reverse").get(); + analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("standard").addTokenFilter("lowercase").get(); assertThat(analyzeResponse.getTokens().size(), equalTo(4)); AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0); - assertThat(token.getTerm(), equalTo("siht")); + assertThat(token.getTerm(), equalTo("this")); token = analyzeResponse.getTokens().get(1); - assertThat(token.getTerm(), equalTo("si")); + assertThat(token.getTerm(), equalTo("is")); token = analyzeResponse.getTokens().get(2); assertThat(token.getTerm(), equalTo("a")); token = analyzeResponse.getTokens().get(3); - assertThat(token.getTerm(), equalTo("tset")); + assertThat(token.getTerm(), equalTo("test")); analyzeResponse = client().admin().indices().prepareAnalyze("of course").setTokenizer("standard").addTokenFilter("stop").get(); assertThat(analyzeResponse.getTokens().size(), equalTo(1)); @@ -280,14 +282,10 @@ public class AnalyzeActionIT extends ESIntegTestCase { assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[2].getTerm(), equalTo("troubled")); String[] expectedAttributesKey = { "bytes", + "termFrequency", "positionLength"}; - assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[2].getAttributes().size(), equalTo(expectedAttributesKey.length)); - Object extendedAttribute; - - for (String key : expectedAttributesKey) { - extendedAttribute = analyzeResponse.detail().tokenfilters()[0].getTokens()[2].getAttributes().get(key); - assertThat(extendedAttribute, notNullValue()); - } + assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[2].getAttributes().keySet(), + equalTo(new HashSet<>(Arrays.asList(expectedAttributesKey)))); } public void testDetailAnalyzeWithMultiValues() throws Exception { @@ -385,7 +383,7 @@ public class AnalyzeActionIT extends ESIntegTestCase { assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[2].getPositionLength(), equalTo(1)); // tokenfilter({"type": "stop", "stopwords": ["foo", "buzz"]}) - assertThat(analyzeResponse.detail().tokenfilters()[1].getName(), equalTo("_anonymous_tokenfilter_[1]")); + assertThat(analyzeResponse.detail().tokenfilters()[1].getName(), equalTo("_anonymous_tokenfilter")); assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens().length, equalTo(1)); assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens()[0].getTerm(), equalTo("test")); diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java b/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java index ba4467a5630..96e885b07ac 100644 --- a/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.indices.analysis.HunspellService; import org.elasticsearch.test.ESTestCase; +import java.nio.file.Path; + import static java.util.Collections.emptyMap; import static org.elasticsearch.indices.analysis.HunspellService.HUNSPELL_IGNORE_CASE; import static org.elasticsearch.indices.analysis.HunspellService.HUNSPELL_LAZY_LOAD; @@ -34,20 +36,19 @@ import static org.hamcrest.Matchers.notNullValue; public class HunspellServiceTests extends ESTestCase { public void testLocaleDirectoryWithNodeLevelConfig() throws Exception { Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(HUNSPELL_IGNORE_CASE.getKey(), true) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); - Dictionary dictionary = new HunspellService(settings, new Environment(settings), emptyMap()).getDictionary("en_US"); + final Environment environment = new Environment(settings, getDataPath("/indices/analyze/conf_dir")); + Dictionary dictionary = new HunspellService(settings, environment, emptyMap()).getDictionary("en_US"); assertThat(dictionary, notNullValue()); assertTrue(dictionary.getIgnoreCase()); } public void testLocaleDirectoryWithLocaleSpecificConfig() throws Exception { Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(HUNSPELL_IGNORE_CASE.getKey(), true) .put("indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing", false) @@ -55,38 +56,44 @@ public class HunspellServiceTests extends ESTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); - Dictionary dictionary = new HunspellService(settings, new Environment(settings), emptyMap()).getDictionary("en_US"); + final Path configPath = getDataPath("/indices/analyze/conf_dir"); + final Environment environment = new Environment(settings, configPath); + Dictionary dictionary = new HunspellService(settings, environment, emptyMap()).getDictionary("en_US"); assertThat(dictionary, notNullValue()); assertFalse(dictionary.getIgnoreCase()); // testing that dictionary specific settings override node level settings - dictionary = new HunspellService(settings, new Environment(settings), emptyMap()).getDictionary("en_US_custom"); + dictionary = new HunspellService(settings, new Environment(settings, configPath), emptyMap()).getDictionary("en_US_custom"); assertThat(dictionary, notNullValue()); assertTrue(dictionary.getIgnoreCase()); } public void testDicWithNoAff() throws Exception { Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/no_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); IllegalStateException e = expectThrows(IllegalStateException.class, - () -> new HunspellService(settings, new Environment(settings), emptyMap()).getDictionary("en_US")); + () -> { + final Environment environment = new Environment(settings, getDataPath("/indices/analyze/no_aff_conf_dir")); + new HunspellService(settings, environment, emptyMap()).getDictionary("en_US"); + }); assertEquals("failed to load hunspell dictionary for locale: en_US", e.getMessage()); assertThat(e.getCause(), hasToString(containsString("Missing affix file"))); } public void testDicWithTwoAffs() throws Exception { Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), getDataPath("/indices/analyze/two_aff_conf_dir")) .put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); IllegalStateException e = expectThrows(IllegalStateException.class, - () -> new HunspellService(settings, new Environment(settings), emptyMap()).getDictionary("en_US")); + () -> { + final Environment environment = new Environment(settings, getDataPath("/indices/analyze/two_aff_conf_dir")); + new HunspellService(settings, environment, emptyMap()).getDictionary("en_US"); + }); assertEquals("failed to load hunspell dictionary for locale: en_US", e.getMessage()); assertThat(e.getCause(), hasToString(containsString("Too many affix files"))); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 4a3b9416396..419b7a430da 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -19,11 +19,13 @@ package org.elasticsearch.indices.cluster; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -33,6 +35,8 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndex; @@ -319,6 +323,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC * Mock for {@link IndexShard} */ protected class MockIndexShard implements IndicesClusterStateService.Shard { + private volatile long clusterStateVersion; private volatile ShardRouting shardRouting; private volatile RecoveryState recoveryState; private volatile Set activeAllocationIds; @@ -341,17 +346,12 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC } @Override - public ShardRouting routingEntry() { - return shardRouting; - } - - @Override - public IndexShardState state() { - return null; - } - - @Override - public void updateRoutingEntry(ShardRouting shardRouting) throws IOException { + public void updateShardState(ShardRouting shardRouting, + long newPrimaryTerm, + CheckedBiConsumer, IOException> primaryReplicaSyncer, + long applyingClusterStateVersion, + Set activeAllocationIds, + Set initializingAllocationIds) throws IOException { failRandomly(); assertThat(this.shardId(), equalTo(shardRouting.shardId())); assertTrue("current: " + this.shardRouting + ", got: " + shardRouting, this.shardRouting.isSameAllocation(shardRouting)); @@ -360,17 +360,22 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC shardRouting.active()); } this.shardRouting = shardRouting; + if (shardRouting.primary()) { + term = newPrimaryTerm; + this.clusterStateVersion = applyingClusterStateVersion; + this.activeAllocationIds = activeAllocationIds; + this.initializingAllocationIds = initializingAllocationIds; + } } @Override - public void updatePrimaryTerm(long primaryTerm) { - term = primaryTerm; + public ShardRouting routingEntry() { + return shardRouting; } @Override - public void updateAllocationIdsFromMaster(Set activeAllocationIds, Set initializingAllocationIds) { - this.activeAllocationIds = activeAllocationIds; - this.initializingAllocationIds = initializingAllocationIds; + public IndexShardState state() { + return null; } public void updateTerm(long newTerm) { diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index adfc6609d8f..a356693213f 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.TestThreadPool; @@ -407,6 +408,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(settings, threadPool, transportService, null, clusterService); final ShardStateAction shardStateAction = mock(ShardStateAction.class); + final PrimaryReplicaSyncer primaryReplicaSyncer = mock(PrimaryReplicaSyncer.class); return new IndicesClusterStateService( settings, indicesService, @@ -420,7 +422,8 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice null, null, null, - null); + null, + primaryReplicaSyncer); } private class RecordingIndicesService extends MockIndicesService { diff --git a/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java b/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java index 31e3ca82326..506cdc812fc 100644 --- a/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java @@ -18,15 +18,20 @@ */ package org.elasticsearch.indices.exists.types; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; @@ -37,10 +42,16 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBloc import static org.hamcrest.Matchers.equalTo; public class TypesExistsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testSimple() throws Exception { Client client = client(); CreateIndexResponse response1 = client.admin().indices().prepareCreate("test1") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()) .addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject()) .execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 57f3ec29e32..a1faa4d5eeb 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -19,16 +19,21 @@ package org.elasticsearch.indices.mapping; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matchers; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -48,6 +53,11 @@ import static org.hamcrest.Matchers.nullValue; public class SimpleGetFieldMappingsIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testGetMappingsWhereThereAreNone() { createIndex("index"); GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().get(); @@ -64,14 +74,65 @@ public class SimpleGetFieldMappingsIT extends ESIntegTestCase { .endObject().endObject().endObject(); } - public void testSimpleGetFieldMappings() throws Exception { + public void testGetFieldMappings() throws Exception { assertAcked(prepareCreate("indexa") - .setSettings("index.mapping.single_type", false) + .addMapping("typeA", getMappingForType("typeA"))); + assertAcked(client().admin().indices().prepareCreate("indexb") + .addMapping("typeB", getMappingForType("typeB"))); + + + // Get mappings by full name + GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get(); + assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1")); + assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); + assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); + + // Get mappings by name + response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get(); + assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1")); + assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); + assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue()); + assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); + + // get mappings by name across multiple indices + response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("obj.subfield").get(); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); + assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield"), nullValue()); + assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue()); + + // get mappings by name across multiple types + response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("obj.subfield").get(); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue()); + assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue()); + assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); + + // get mappings by name across multiple types & indices + response = client().admin().indices().prepareGetFieldMappings().setFields("obj.subfield").get(); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); + assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue()); + assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); + assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield")); + assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield")); + assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); + } + + public void testGetFieldMappingsMultiType() throws Exception { + assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); + assertAcked(prepareCreate("indexa") + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("typeA", getMappingForType("typeA")) .addMapping("typeB", getMappingForType("typeB"))); assertAcked(client().admin().indices().prepareCreate("indexb") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("typeA", getMappingForType("typeA")) .addMapping("typeB", getMappingForType("typeB"))); @@ -186,15 +247,14 @@ public class SimpleGetFieldMappingsIT extends ESIntegTestCase { public void testGetFieldMappingsWithBlocks() throws Exception { assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) - .addMapping("typeA", getMappingForType("typeA")) - .addMapping("typeB", getMappingForType("typeB"))); + .addMapping("doc", getMappingForType("doc"))); for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { try { enableIndexBlock("test", block); - GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("typeA").setFields("field1", "obj.subfield").get(); - assertThat(response.fieldMappings("test", "typeA", "field1").fullName(), equalTo("field1")); + GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("doc") + .setFields("field1", "obj.subfield").get(); + assertThat(response.fieldMappings("test", "doc", "field1").fullName(), equalTo("field1")); } finally { disableIndexBlock("test", block); } diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 89b0d48ef22..d60e93d80c4 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -19,15 +19,20 @@ package org.elasticsearch.indices.mapping; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.common.Priority; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA; @@ -41,6 +46,12 @@ import static org.hamcrest.Matchers.notNullValue; @ClusterScope(randomDynamicTemplates = false) public class SimpleGetMappingsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testGetMappingsWhereThereAreNone() { createIndex("index"); GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet(); @@ -56,14 +67,14 @@ public class SimpleGetMappingsIT extends ESIntegTestCase { public void testSimpleGetMappings() throws Exception { client().admin().indices().prepareCreate("indexa") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("typeA", getMappingForType("typeA")) .addMapping("typeB", getMappingForType("typeB")) .addMapping("Atype", getMappingForType("Atype")) .addMapping("Btype", getMappingForType("Btype")) .execute().actionGet(); client().admin().indices().prepareCreate("indexb") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("typeA", getMappingForType("typeA")) .addMapping("typeB", getMappingForType("typeB")) .addMapping("Atype", getMappingForType("Atype")) @@ -145,9 +156,7 @@ public class SimpleGetMappingsIT extends ESIntegTestCase { public void testGetMappingsWithBlocks() throws IOException { client().admin().indices().prepareCreate("test") - .setSettings("index.mapping.single_type", false) - .addMapping("typeA", getMappingForType("typeA")) - .addMapping("typeB", getMappingForType("typeB")) + .addMapping("doc", getMappingForType("doc")) .execute().actionGet(); ensureGreen(); @@ -156,7 +165,7 @@ public class SimpleGetMappingsIT extends ESIntegTestCase { enableIndexBlock("test", block); GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet(); assertThat(response.mappings().size(), equalTo(1)); - assertThat(response.mappings().get("test").size(), equalTo(2)); + assertThat(response.mappings().get("test").size(), equalTo(1)); } finally { disableIndexBlock("test", block); } diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index d41ef16d771..c4e66caa799 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.mapping; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -33,13 +34,17 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CyclicBarrier; @@ -62,6 +67,11 @@ import static org.hamcrest.Matchers.not; @ClusterScope(randomDynamicTemplates = false) public class UpdateMappingIntegrationIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testDynamicUpdates() throws Exception { client().admin().indices().prepareCreate("test") .setSettings( @@ -69,7 +79,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) - .put("index.mapping.single_type", false) + .put("index.version.created", Version.V_5_6_0) // for multiple types ).execute().actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); @@ -342,8 +352,9 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { } public void testUpdateMappingOnAllTypes() throws IOException { + assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); assertAcked(prepareCreate("index") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("type1", "f", "type=keyword").addMapping("type2", "f", "type=keyword")); assertAcked(client().admin().indices().preparePutMapping("index") diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 484f6e5db76..80001ed16ae 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -381,16 +381,13 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { /** Issues a cache clear and waits 30 seconds for the field data breaker to be cleared */ public void clearFieldData() throws Exception { client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet(); - assertBusy(new Runnable() { - @Override - public void run() { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats() - .clear().setBreaker(true).get(new TimeValue(15, TimeUnit.SECONDS)); - for (NodeStats nStats : resp.getNodes()) { - assertThat("fielddata breaker never reset back to 0", - nStats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), - equalTo(0L)); - } + assertBusy(() -> { + NodesStatsResponse resp = client().admin().cluster().prepareNodesStats() + .clear().setBreaker(true).get(new TimeValue(15, TimeUnit.SECONDS)); + for (NodeStats nStats : resp.getNodes()) { + assertThat("fielddata breaker never reset back to 0", + nStats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), + equalTo(0L)); } }, 30, TimeUnit.SECONDS); } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 7542545bc3a..cf1449fecd6 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -269,17 +269,13 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> waiting for recovery to start both on source and target"); final Index index = resolveIndex(INDEX_NAME); - assertBusy(new Runnable() { - @Override - public void run() { - - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); - assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), - equalTo(1)); - indicesService = internalCluster().getInstance(IndicesService.class, nodeB); - assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), - equalTo(1)); - } + assertBusy(() -> { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), + equalTo(1)); + indicesService = internalCluster().getInstance(IndicesService.class, nodeB); + assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), + equalTo(1)); }); logger.info("--> request recoveries"); @@ -318,19 +314,16 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> checking throttling increases"); final long finalNodeAThrottling = nodeAThrottling; final long finalNodeBThrottling = nodeBThrottling; - assertBusy(new Runnable() { - @Override - public void run() { - NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get(); - assertThat(statsResponse.getNodes(), hasSize(2)); - for (NodeStats nodeStats : statsResponse.getNodes()) { - final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats(); - if (nodeStats.getNode().getName().equals(nodeA)) { - assertThat("node A throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeAThrottling)); - } - if (nodeStats.getNode().getName().equals(nodeB)) { - assertThat("node B throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeBThrottling)); - } + assertBusy(() -> { + NodesStatsResponse statsResponse1 = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get(); + assertThat(statsResponse1.getNodes(), hasSize(2)); + for (NodeStats nodeStats : statsResponse1.getNodes()) { + final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats(); + if (nodeStats.getNode().getName().equals(nodeA)) { + assertThat("node A throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeAThrottling)); + } + if (nodeStats.getNode().getName().equals(nodeB)) { + assertThat("node B throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeBThrottling)); } } }); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java new file mode 100644 index 00000000000..0e1f37c2878 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; + +public class PeerRecoverySourceServiceTests extends IndexShardTestCase { + + public void testDuplicateRecoveries() throws IOException { + IndexShard primary = newStartedShard(true); + PeerRecoverySourceService peerRecoverySourceService = new PeerRecoverySourceService(Settings.EMPTY, + mock(TransportService.class), mock(IndicesService.class), + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(ClusterService.class)); + StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest(primary.shardId(), randomAlphaOfLength(10), + getFakeDiscoNode("source"), getFakeDiscoNode("target"), null, randomBoolean(), randomLong(), randomLong()); + RecoverySourceHandler handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary); + DelayRecoveryException delayRecoveryException = expectThrows(DelayRecoveryException.class, + () -> peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary)); + assertThat(delayRecoveryException.getMessage(), containsString("recovery with same target already registered")); + peerRecoverySourceService.ongoingRecoveries.remove(primary, handler); + // re-adding after removing previous attempt works + handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary); + peerRecoverySourceService.ongoingRecoveries.remove(primary, handler); + closeShards(primary); + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index a2e67858584..f8c971c4405 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.IndexShard; @@ -59,10 +58,10 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { final String index = replica.shardId().getIndexName(); long seqNo = 0; for (int i = 0; i < docs; i++) { - Engine.Index indexOp = replica.prepareIndexOnReplica( + replica.applyIndexOperationOnReplica(seqNo++, replica.getPrimaryTerm(), 1, VersionType.EXTERNAL, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON), - seqNo++, replica.getPrimaryTerm(), 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); - replica.index(indexOp); + update -> {}); if (rarely()) { // insert a gap seqNo++; diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 09a787ce0d3..5532ad040f2 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesArray; @@ -76,6 +77,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -377,6 +379,11 @@ public class RecoverySourceHandlerTests extends ESTestCase { when(shard.state()).thenReturn(IndexShardState.RELOCATED); when(shard.acquireIndexCommit(anyBoolean())).thenReturn(mock(Engine.IndexCommitRef.class)); final AtomicBoolean phase1Called = new AtomicBoolean(); +// final Engine.IndexCommitRef indexCommitRef = mock(Engine.IndexCommitRef.class); +// when(shard.acquireIndexCommit(anyBoolean())).thenReturn(indexCommitRef); +// final IndexCommit indexCommit = mock(IndexCommit.class); +// when(indexCommitRef.getIndexCommit()).thenReturn(indexCommit); +// when(indexCommit.getUserData()).thenReturn(Collections.emptyMap());final AtomicBoolean phase1Called = new AtomicBoolean(); final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean(); final AtomicBoolean phase2Called = new AtomicBoolean(); final RecoverySourceHandler handler = new RecoverySourceHandler( @@ -394,7 +401,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { } @Override - public void phase1(final IndexCommit snapshot, final Translog.View translogView) { + public void phase1(final IndexCommit snapshot, final Translog.View translogView, final long startSeqNo) { phase1Called.set(true); } @@ -448,9 +455,20 @@ public class RecoverySourceHandlerTests extends ESTestCase { relocated.set(true); assertTrue(recoveriesDelayed.get()); return null; - }).when(shard).relocated(any(String.class)); + }).when(shard).relocated(any(String.class), any(Consumer.class)); when(shard.acquireIndexCommit(anyBoolean())).thenReturn(mock(Engine.IndexCommitRef.class)); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + final ActionListener listener = (ActionListener)invocationOnMock.getArguments()[0]; + listener.onResponse(() -> {}); + return null; + }).when(shard).acquirePrimaryOperationPermit(any(ActionListener.class), any(String.class)); +// final Engine.IndexCommitRef indexCommitRef = mock(Engine.IndexCommitRef.class); +// when(shard.acquireIndexCommit(anyBoolean())).thenReturn(indexCommitRef); +// final IndexCommit indexCommit = mock(IndexCommit.class); +// when(indexCommitRef.getIndexCommit()).thenReturn(indexCommit); +// when(indexCommit.getUserData()).thenReturn(Collections.emptyMap()); final Supplier currentClusterStateVersionSupplier = () -> { assertFalse(ensureClusterStateVersionCalled.get()); assertTrue(recoveriesDelayed.get()); @@ -487,7 +505,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { } @Override - public void phase1(final IndexCommit snapshot, final Translog.View translogView) { + public void phase1(final IndexCommit snapshot, final Translog.View translogView, final long startSeqNo) { phase1Called.set(true); } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java index 4f893c946ec..7a65541cb5e 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java @@ -157,12 +157,7 @@ public class RecoveryTargetTests extends ESTestCase { Timer lastRead = streamer.serializeDeserialize(); final long time = lastRead.time(); assertThat(time, lessThanOrEqualTo(timer.time())); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat("timer timer should progress compared to captured one ", time, lessThan(timer.time())); - } - }); + assertBusy(() -> assertThat("timer timer should progress compared to captured one ", time, lessThan(timer.time()))); assertThat("captured time shouldn't change", lastRead.time(), equalTo(time)); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java new file mode 100644 index 00000000000..2a95bf33908 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; +import org.elasticsearch.index.replication.RecoveryDuringReplicationTests; +import org.elasticsearch.index.shard.IndexShard; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; + +import static org.hamcrest.Matchers.equalTo; + +public class RecoveryTests extends ESIndexLevelReplicationTestCase { + + public void testTranslogHistoryTransferred() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + shards.startPrimary(); + int docs = shards.indexDocs(10); + shards.getPrimary().getTranslog().rollGeneration(); + shards.flush(); + if (randomBoolean()) { + docs += shards.indexDocs(10); + } + shards.addReplica(); + shards.startAll(); + final IndexShard replica = shards.getReplicas().get(0); + assertThat(replica.getTranslog().totalOperations(), equalTo(docs)); + } + } + + + public void testRetentionPolicyChangeDuringRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + shards.startPrimary(); + shards.indexDocs(10); + shards.getPrimary().getTranslog().rollGeneration(); + shards.flush(); + shards.indexDocs(10); + final IndexShard replica = shards.addReplica(); + final CountDownLatch recoveryBlocked = new CountDownLatch(1); + final CountDownLatch releaseRecovery = new CountDownLatch(1); + Future future = shards.asyncRecoverReplica(replica, + (indexShard, node) -> new RecoveryDuringReplicationTests.BlockingTarget(RecoveryState.Stage.TRANSLOG, + recoveryBlocked, releaseRecovery, indexShard, node, recoveryListener, logger)); + recoveryBlocked.await(); + IndexMetaData.Builder builder = IndexMetaData.builder(replica.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder().put(replica.indexSettings().getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + // force a roll and flush + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "100b") + ); + replica.indexSettings().updateIndexMetaData(builder.build()); + replica.onSettingsChanged(); + releaseRecovery.countDown(); + future.get(); + // rolling/flushing is async + assertBusy(() -> assertThat(replica.getTranslog().totalOperations(), equalTo(0))); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index c38c20e0c25..805fcb4d501 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.state; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -266,23 +267,20 @@ public class RareClusterStateIT extends ESIntegTestCase { } }); // ...and wait for mappings to be available on master - assertBusy(new Runnable() { - @Override - public void run() { - ImmutableOpenMap indexMappings = client().admin().indices().prepareGetMappings("index").get().getMappings().get("index"); - assertNotNull(indexMappings); - MappingMetaData typeMappings = indexMappings.get("type"); - assertNotNull(typeMappings); - Object properties; - try { - properties = typeMappings.getSourceAsMap().get("properties"); - } catch (IOException e) { - throw new AssertionError(e); - } - assertNotNull(properties); - Object fieldMapping = ((Map) properties).get("field"); - assertNotNull(fieldMapping); + assertBusy(() -> { + ImmutableOpenMap indexMappings = client().admin().indices().prepareGetMappings("index").get().getMappings().get("index"); + assertNotNull(indexMappings); + MappingMetaData typeMappings = indexMappings.get("type"); + assertNotNull(typeMappings); + Object properties; + try { + properties = typeMappings.getSourceAsMap().get("properties"); + } catch (ElasticsearchParseException e) { + throw new AssertionError(e); } + assertNotNull(properties); + Object fieldMapping = ((Map) properties).get("field"); + assertNotNull(fieldMapping); }); final AtomicReference docIndexResponse = new AtomicReference<>(); @@ -307,17 +305,14 @@ public class RareClusterStateIT extends ESIntegTestCase { // Now make sure the indexing request finishes successfully disruption.stopDisrupting(); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); - PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); - assertTrue(resp.isAcknowledged()); - assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); - IndexResponse docResp = (IndexResponse) docIndexResponse.get(); - assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), - 1, docResp.getShardInfo().getTotal()); - } + assertBusy(() -> { + assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); + PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); + assertTrue(resp.isAcknowledged()); + assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); + IndexResponse docResp = (IndexResponse) docIndexResponse.get(); + assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), + 1, docResp.getShardInfo().getTotal()); }); } @@ -387,17 +382,14 @@ public class RareClusterStateIT extends ESIntegTestCase { }); final Index index = resolveIndex("index"); // Wait for mappings to be available on master - assertBusy(new Runnable() { - @Override - public void run() { - final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); - final IndexService indexService = indicesService.indexServiceSafe(index); - assertNotNull(indexService); - final MapperService mapperService = indexService.mapperService(); - DocumentMapper mapper = mapperService.documentMapper("type"); - assertNotNull(mapper); - assertNotNull(mapper.mappers().getMapper("field")); - } + assertBusy(() -> { + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); + final IndexService indexService = indicesService.indexServiceSafe(index); + assertNotNull(indexService); + final MapperService mapperService = indexService.mapperService(); + DocumentMapper mapper = mapperService.documentMapper("type"); + assertNotNull(mapper); + assertNotNull(mapper.mappers().getMapper("field")); }); final AtomicReference docIndexResponse = new AtomicReference<>(); @@ -414,12 +406,7 @@ public class RareClusterStateIT extends ESIntegTestCase { }); // Wait for document to be indexed on primary - assertBusy(new Runnable() { - @Override - public void run() { - assertTrue(client().prepareGet("index", "type", "1").setPreference("_primary").get().isExists()); - } - }); + assertBusy(() -> assertTrue(client().prepareGet("index", "type", "1").setPreference("_primary").get().isExists())); // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled @@ -430,17 +417,14 @@ public class RareClusterStateIT extends ESIntegTestCase { // Now make sure the indexing request finishes successfully disruption.stopDisrupting(); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); - PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); - assertTrue(resp.isAcknowledged()); - assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); - IndexResponse docResp = (IndexResponse) docIndexResponse.get(); - assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), - 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded - } + assertBusy(() -> { + assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); + PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); + assertTrue(resp.isAcknowledged()); + assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); + IndexResponse docResp = (IndexResponse) docIndexResponse.get(); + assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), + 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded }); } diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index f8cf9bd7a3c..36381233a61 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.stats; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -53,14 +54,18 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.Random; @@ -90,6 +95,12 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, randomDynamicTemplates = false) @SuppressCodecs("*") // requires custom completion format public class IndexStatsIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad @@ -269,12 +280,7 @@ public class IndexStatsIT extends ESIntegTestCase { } indexRandom(true, builders); refresh(); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0L)); - } - }); + assertBusy(() -> assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0L))); for (int i = 0; i < 10; i++) { assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs)); @@ -383,7 +389,8 @@ public class IndexStatsIT extends ESIntegTestCase { } public void testSimpleStats() throws Exception { - assertAcked(prepareCreate("test1").setSettings("index.mapping.single_type", false)); + // this test has some type stats tests that can be removed in 7.0 + assertAcked(prepareCreate("test1").setSettings("index.version.created", Version.V_5_6_0.id)); // allows for multiple types createIndex("test2"); ensureGreen(); @@ -513,7 +520,7 @@ public class IndexStatsIT extends ESIntegTestCase { } public void testMergeStats() { - assertAcked(prepareCreate("test1").setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate("test_index")); ensureGreen(); @@ -535,8 +542,7 @@ public class IndexStatsIT extends ESIntegTestCase { assertThat(stats.getTotal().getSearch(), nullValue()); for (int i = 0; i < 20; i++) { - client().prepareIndex("test1", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1", "type2", Integer.toString(i)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test_index", "doc", Integer.toString(i)).setSource("field", "value").execute().actionGet(); client().admin().indices().prepareFlush().execute().actionGet(); } client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); @@ -549,15 +555,14 @@ public class IndexStatsIT extends ESIntegTestCase { } public void testSegmentsStats() { - assertAcked(prepareCreate("test1") - .setSettings(SETTING_NUMBER_OF_REPLICAS, between(0, 1), "index.mapping.single_type", false)); + assertAcked(prepareCreate("test_index") + .setSettings(SETTING_NUMBER_OF_REPLICAS, between(0, 1))); ensureGreen(); - NumShards test1 = getNumShards("test1"); + NumShards test1 = getNumShards("test_index"); for (int i = 0; i < 100; i++) { - index("test1", "type1", Integer.toString(i), "field", "value"); - index("test1", "type2", Integer.toString(i), "field", "value"); + index("test_index", "doc", Integer.toString(i), "field", "value"); } IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get(); @@ -575,14 +580,14 @@ public class IndexStatsIT extends ESIntegTestCase { public void testAllFlags() throws Exception { // rely on 1 replica for this tests - assertAcked(prepareCreate("test1").setSettings("index.mapping.single_type", false)); - createIndex("test2"); + assertAcked(prepareCreate("test_index")); + createIndex("test_index_2"); ensureGreen(); - client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test_index", "doc", Integer.toString(1)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test_index", "doc", Integer.toString(2)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test_index_2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet(); client().admin().indices().prepareRefresh().execute().actionGet(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); @@ -697,14 +702,14 @@ public class IndexStatsIT extends ESIntegTestCase { } public void testMultiIndex() throws Exception { - assertAcked(prepareCreate("test1").setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate("test1")); createIndex("test2"); ensureGreen(); - client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test1", "doc", Integer.toString(1)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test1", "doc", Integer.toString(2)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test2", "doc", Integer.toString(1)).setSource("field", "value").execute().actionGet(); refresh(); int numShards1 = getNumShards("test1").totalNumShards; @@ -737,14 +742,14 @@ public class IndexStatsIT extends ESIntegTestCase { public void testFieldDataFieldsParam() throws Exception { assertAcked(client().admin().indices().prepareCreate("test1") - .setSettings("index.mapping.single_type", false) - .addMapping("type", "bar", "type=text,fielddata=true", + .setSettings("index.version.created", Version.V_5_6_0.id) + .addMapping("doc", "bar", "type=text,fielddata=true", "baz", "type=text,fielddata=true").get()); ensureGreen(); - client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - client().prepareIndex("test1", "baz", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + client().prepareIndex("test1", "doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + client().prepareIndex("test1", "doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); refresh(); client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet(); @@ -785,14 +790,12 @@ public class IndexStatsIT extends ESIntegTestCase { public void testCompletionFieldsParam() throws Exception { assertAcked(prepareCreate("test1") - .setSettings("index.mapping.single_type", false) .addMapping( - "bar", + "doc", "{ \"properties\": { \"bar\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}},\"baz\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}}}", XContentType.JSON)); ensureGreen(); - client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); - client().prepareIndex("test1", "baz", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + client().prepareIndex("test1", "doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); refresh(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 4e1be614fa5..7f6155979c9 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -21,12 +21,10 @@ package org.elasticsearch.indices.store; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -40,7 +38,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; @@ -378,12 +375,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // allocation filtering may not have immediate effect // TODO: we should add an easier to do this. It's too much of a song and dance.. Index index = resolveIndex("test"); - assertBusy(new Runnable() { - @Override - public void run() { - assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index)); - } - }); + assertBusy(() -> assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index))); // wait for 4 active shards - we should have lost one shard assertFalse(client().admin().cluster().prepareHealth().setWaitForActiveShards(4).get().isTimedOut()); diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 017026844f1..901e2b37bf8 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices.template; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; @@ -37,13 +38,16 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.InvalidAliasNameException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.junit.After; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -67,6 +71,11 @@ import static org.hamcrest.Matchers.nullValue; public class SimpleIndexTemplateIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + @After public void cleanupTemplates() { client().admin().indices().prepareDeleteTemplate("*").get(); @@ -383,7 +392,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .get(); assertAcked(prepareCreate("test_index") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) // allow for multiple version .addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ")); ensureGreen(); @@ -431,8 +440,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { " \"aliases\" : {\n" + " \"my_alias\" : {\n" + " \"filter\" : {\n" + - " \"type\" : {\n" + - " \"value\" : \"type2\"\n" + + " \"term\" : {\n" + + " \"field\" : \"value2\"\n" + " }\n" + " }\n" + " }\n" + @@ -441,16 +450,15 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { assertAcked(prepareCreate("test_index") - .setSettings("index.mapping.single_type", false) - .addMapping("type1").addMapping("type2")); + .addMapping("doc")); ensureGreen(); GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(1)); - client().prepareIndex("test_index", "type1", "1").setSource("field", "value1").get(); - client().prepareIndex("test_index", "type2", "2").setSource("field", "value2").get(); + client().prepareIndex("test_index", "doc", "1").setSource("field", "value1").get(); + client().prepareIndex("test_index", "doc", "2").setSource("field", "value2").get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test_index").get(); @@ -458,7 +466,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { searchResponse = client().prepareSearch("my_alias").get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getType(), equalTo("type2")); + assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); } public void testIndexTemplateWithAliasesSource() { @@ -469,8 +477,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { " \"alias1\" : {},\n" + " \"alias2\" : {\n" + " \"filter\" : {\n" + - " \"type\" : {\n" + - " \"value\" : \"type2\"\n" + + " \"term\" : {\n" + + " \"field\" : \"value2\"\n" + " }\n" + " }\n" + " },\n" + @@ -478,16 +486,15 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { " }\n").get(); assertAcked(prepareCreate("test_index") - .setSettings("index.mapping.single_type", false) - .addMapping("type1").addMapping("type2")); + .addMapping("doc")); ensureGreen(); GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(3)); - client().prepareIndex("test_index", "type1", "1").setSource("field", "value1").get(); - client().prepareIndex("test_index", "type2", "2").setSource("field", "value2").get(); + client().prepareIndex("test_index", "doc", "1").setSource("field", "value1").get(); + client().prepareIndex("test_index", "doc", "2").setSource("field", "value2").get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test_index").get(); @@ -498,7 +505,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { searchResponse = client().prepareSearch("alias2").get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getType(), equalTo("type2")); + assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value2")); } public void testDuplicateAlias() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java index bf23da1868d..1ce6ed5779f 100644 --- a/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java +++ b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java @@ -181,7 +181,7 @@ public class InternalSettingsPreparerTests extends ESTestCase { public void testDefaultPropertiesDoNothing() throws Exception { Map props = Collections.singletonMap("default.setting", "foo"); - Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props); + Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props, null); assertEquals("foo", env.settings().get("default.setting")); assertNull(env.settings().get("setting")); } diff --git a/core/src/test/java/org/elasticsearch/node/NodeTests.java b/core/src/test/java/org/elasticsearch/node/NodeTests.java index e99c7b90631..6590def3a72 100644 --- a/core/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/core/src/test/java/org/elasticsearch/node/NodeTests.java @@ -57,14 +57,8 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; public class NodeTests extends ESTestCase { public void testNodeName() throws IOException { - final Path tempDir = createTempDir(); final String name = randomBoolean() ? randomAlphaOfLength(10) : null; - Settings.Builder settings = Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("transport.type", "mock-socket-network") - .put(Node.NODE_DATA_SETTING.getKey(), true); + Settings.Builder settings = baseSettings(); if (name != null) { settings.put(Node.NODE_NAME_SETTING.getKey(), name); } @@ -97,14 +91,8 @@ public class NodeTests extends ESTestCase { } public void testLoadPluginBootstrapChecks() throws IOException { - final Path tempDir = createTempDir(); final String name = randomBoolean() ? randomAlphaOfLength(10) : null; - Settings.Builder settings = Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("transport.type", "mock-socket-network") - .put(Node.NODE_DATA_SETTING.getKey(), true); + Settings.Builder settings = baseSettings(); if (name != null) { settings.put(Node.NODE_NAME_SETTING.getKey(), name); } @@ -175,79 +163,13 @@ public class NodeTests extends ESTestCase { } } - public void testDefaultPathDataSet() throws IOException { - final Path zero = createTempDir().toAbsolutePath(); - final Path one = createTempDir().toAbsolutePath(); - final Path defaultPathData = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", "/home") - .put("path.data.0", zero) - .put("path.data.1", one) - .put("default.path.data", defaultPathData) - .build(); - try (NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings))) { - final Path defaultPathDataWithNodesAndId = defaultPathData.resolve("nodes/0"); - Files.createDirectories(defaultPathDataWithNodesAndId); - final NodeEnvironment.NodePath defaultNodePath = new NodeEnvironment.NodePath(defaultPathDataWithNodesAndId); - final boolean indexExists = randomBoolean(); - final List indices; - if (indexExists) { - indices = IntStream.range(0, randomIntBetween(1, 3)).mapToObj(i -> UUIDs.randomBase64UUID()).collect(Collectors.toList()); - for (final String index : indices) { - Files.createDirectories(defaultNodePath.indicesPath.resolve(index)); - } - } else { - indices = Collections.emptyList(); - } - final Logger mock = mock(Logger.class); - if (indexExists) { - final IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock)); - final String message = String.format( - Locale.ROOT, - "detected index data in default.path.data [%s] where there should not be any; check the logs for details", - defaultPathData); - assertThat(e, hasToString(containsString(message))); - verify(mock) - .error("detected index data in default.path.data [{}] where there should not be any", defaultNodePath.indicesPath); - for (final String index : indices) { - verify(mock).info( - "index folder [{}] in default.path.data [{}] must be moved to any of {}", - index, - defaultNodePath.indicesPath, - Arrays.stream(nodeEnv.nodePaths()).map(np -> np.indicesPath).collect(Collectors.toList())); - } - verifyNoMoreInteractions(mock); - } else { - Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock); - verifyNoMoreInteractions(mock); - } - } - } - - public void testDefaultPathDataNotSet() throws IOException { - final Path zero = createTempDir().toAbsolutePath(); - final Path one = createTempDir().toAbsolutePath(); - final Settings settings = Settings.builder() - .put("path.home", "/home") - .put("path.data.0", zero) - .put("path.data.1", one) - .build(); - try (NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings))) { - final Logger mock = mock(Logger.class); - Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock); - verifyNoMoreInteractions(mock); - } - } - private static Settings.Builder baseSettings() { final Path tempDir = createTempDir(); return Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("transport.type", "mock-socket-network") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "mock-socket-network") .put(Node.NODE_DATA_SETTING.getKey(), true); } diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index e980081479b..c3fd0b19f73 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Locale; @@ -54,7 +55,7 @@ public class PluginsServiceTests extends ESTestCase { public static class FilterablePlugin extends Plugin implements ScriptPlugin {} static PluginsService newPluginsService(Settings settings, Class... classpathPlugins) { - return new PluginsService(settings, null, new Environment(settings).pluginsFile(), Arrays.asList(classpathPlugins)); + return new PluginsService(settings, null, null, new Environment(settings).pluginsFile(), Arrays.asList(classpathPlugins)); } public void testAdditionalSettings() { @@ -151,4 +152,90 @@ public class PluginsServiceTests extends ESTestCase { assertThat(e, hasToString(containsString(expected))); } + public void testLoadPluginWithNoPublicConstructor() { + class NoPublicConstructorPlugin extends Plugin { + + private NoPublicConstructorPlugin() { + + } + + } + + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> newPluginsService(settings, NoPublicConstructorPlugin.class)); + assertThat(e, hasToString(containsString("no public constructor"))); + } + + public void testLoadPluginWithMultiplePublicConstructors() { + class MultiplePublicConstructorsPlugin extends Plugin { + + @SuppressWarnings("unused") + public MultiplePublicConstructorsPlugin() { + + } + + @SuppressWarnings("unused") + public MultiplePublicConstructorsPlugin(final Settings settings) { + + } + + } + + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> newPluginsService(settings, MultiplePublicConstructorsPlugin.class)); + assertThat(e, hasToString(containsString("no unique public constructor"))); + } + + public void testLoadPluginWithNoPublicConstructorOfCorrectSignature() { + class TooManyParametersPlugin extends Plugin { + + @SuppressWarnings("unused") + public TooManyParametersPlugin(Settings settings, Path configPath, Object object) { + + } + + } + + class TwoParametersFirstIncorrectType extends Plugin { + + @SuppressWarnings("unused") + public TwoParametersFirstIncorrectType(Object object, Path configPath) { + + } + } + + class TwoParametersSecondIncorrectType extends Plugin { + + @SuppressWarnings("unused") + public TwoParametersSecondIncorrectType(Settings settings, Object object) { + + } + + } + + class OneParameterIncorrectType extends Plugin { + + @SuppressWarnings("unused") + public OneParameterIncorrectType(Object object) { + + } + } + + final Collection> classes = Arrays.asList( + TooManyParametersPlugin.class, + TwoParametersFirstIncorrectType.class, + TwoParametersSecondIncorrectType.class, + OneParameterIncorrectType.class); + for (Class pluginClass : classes) { + final Path home = createTempDir(); + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), home).build(); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings, pluginClass)); + assertThat(e, hasToString(containsString("no public constructor of correct signature"))); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/plugins/spi/NamedXContentProviderTests.java b/core/src/test/java/org/elasticsearch/plugins/spi/NamedXContentProviderTests.java new file mode 100644 index 00000000000..3b63d88f392 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/plugins/spi/NamedXContentProviderTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins.spi; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.term.TermSuggestion; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.ServiceLoader; +import java.util.function.Predicate; + +public class NamedXContentProviderTests extends ESTestCase { + + public void testSpiFileExists() throws IOException { + String serviceFile = "/META-INF/services/" + NamedXContentProvider.class.getName(); + List implementations = new ArrayList<>(); + try (InputStream input = NamedXContentProviderTests.class.getResourceAsStream(serviceFile)) { + Streams.readAllLines(input, implementations::add); + } + + assertEquals(1, implementations.size()); + assertEquals(TestNamedXContentProvider.class.getName(), implementations.get(0)); + } + + public void testNamedXContents() { + final List namedXContents = new ArrayList<>(); + for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) { + namedXContents.addAll(service.getNamedXContentParsers()); + } + + assertEquals(2, namedXContents.size()); + + List> predicates = new ArrayList<>(2); + predicates.add(e -> Aggregation.class.equals(e.categoryClass) && "test_aggregation".equals(e.name.getPreferredName())); + predicates.add(e -> Suggest.Suggestion.class.equals(e.categoryClass) && "test_suggestion".equals(e.name.getPreferredName())); + predicates.forEach(predicate -> assertEquals(1, namedXContents.stream().filter(predicate).count())); + } + + public static class TestNamedXContentProvider implements NamedXContentProvider { + + public TestNamedXContentProvider() { + } + + @Override + public List getNamedXContentParsers() { + return Arrays.asList( + new NamedXContentRegistry.Entry(Aggregation.class, new ParseField("test_aggregation"), + (parser, context) -> ParsedSimpleValue.fromXContent(parser, (String) context)), + new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField("test_suggestion"), + (parser, context) -> TermSuggestion.fromXContent(parser, (String) context)) + ); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index 50035e1027b..d6daf3509f5 100644 --- a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -54,6 +54,7 @@ public class FullRollingRestartIT extends ESIntegTestCase { return 1; } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25420") public void testFullRollingRestart() throws Exception { Settings settings = Settings.builder().put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "30s").build(); internalCluster().startNode(settings); diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 57249e186db..b0d25f43bd6 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -53,7 +53,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllS import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE") +@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.index.seqno:TRACE,org.elasticsearch.indices.recovery:TRACE") public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { private final Logger logger = Loggers.getLogger(RecoveryWhileUnderLoadIT.class); @@ -341,12 +341,9 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { } private void refreshAndAssert() throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - RefreshResponse actionGet = client().admin().indices().prepareRefresh().get(); - assertAllSuccessful(actionGet); - } + assertBusy(() -> { + RefreshResponse actionGet = client().admin().indices().prepareRefresh().get(); + assertAllSuccessful(actionGet); }, 5, TimeUnit.MINUTES); } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index fe83847bff2..48f6fdeaedb 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -514,14 +514,6 @@ public class RelocationIT extends ESIntegTestCase { // refresh is a replication action so this forces a global checkpoint sync which is needed as these are asserted on in tear down client().admin().indices().prepareRefresh("test").get(); - /* - * We have to execute a second refresh as in the face of relocations, the relocation target is not aware of the in-sync set and so - * the first refresh would bring back the local checkpoint for any shards added to the in-sync set that the relocation target was - * not tracking. - */ - // TODO: remove this after a primary context is transferred during relocation handoff - client().admin().indices().prepareRefresh("test").get(); - } class RecoveryCorruption extends MockTransportService.DelegateTransport { diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 9a81b1bcbbd..c943f98a66f 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -54,10 +54,8 @@ public class ScriptServiceTests extends ESTestCase { @Before public void setup() throws IOException { - Path genericConfigFolder = createTempDir(); baseSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 10000) .build(); Map, Object>> scripts = new HashMap<>(); diff --git a/core/src/test/java/org/elasticsearch/search/SearchHitTests.java b/core/src/test/java/org/elasticsearch/search/SearchHitTests.java index 5c7f11875e3..fb5ecb38114 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.text.Text; @@ -70,7 +71,7 @@ public class SearchHitTests extends ESTestCase { if (randomBoolean()) { nestedIdentity = NestedIdentityTests.createTestItem(randomIntBetween(0, 2)); } - Map fields = new HashMap<>(); + Map fields = new HashMap<>(); if (randomBoolean()) { int size = randomIntBetween(0, 10); for (int i = 0; i < size; i++) { @@ -78,10 +79,10 @@ public class SearchHitTests extends ESTestCase { XContentType.JSON); if (randomBoolean()) { String metaField = randomFrom(META_FIELDS); - fields.put(metaField, new SearchHitField(metaField, values.v1())); + fields.put(metaField, new DocumentField(metaField, values.v1())); } else { String fieldName = randomAlphaOfLengthBetween(5, 10); - fields.put(fieldName, new SearchHitField(fieldName, values.v1())); + fields.put(fieldName, new DocumentField(fieldName, values.v1())); } } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index c6e1156bb71..f7a5b2f7f38 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -19,13 +19,18 @@ package org.elasticsearch.search.aggregations; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.bucket.adjacency.InternalAdjacencyMatrixTests; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilterTests; import org.elasticsearch.search.aggregations.bucket.filters.InternalFiltersTests; @@ -79,9 +84,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.function.Predicate; import java.util.stream.Collectors; import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; /** * This class tests that aggregations parsing works properly. It checks that we can parse @@ -172,11 +179,55 @@ public class AggregationsTests extends ESTestCase { } public void testFromXContent() throws IOException { + parseAndAssert(false); + } + + public void testFromXContentWithRandomFields() throws IOException { + parseAndAssert(true); + } + + /** + * Test that parsing works for a randomly created Aggregations object with a + * randomized aggregation tree. The test randomly chooses an + * {@link XContentType}, randomizes the order of the {@link XContent} fields + * and randomly sets the `humanReadable` flag when rendering the + * {@link XContent}. + * + * @param addRandomFields + * if set, this will also add random {@link XContent} fields to + * tests that the parsers are lenient to future additions to rest + * responses + */ + private void parseAndAssert(boolean addRandomFields) throws IOException { XContentType xContentType = randomFrom(XContentType.values()); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); Aggregations aggregations = createTestInstance(); BytesReference originalBytes = toShuffledXContent(aggregations, xContentType, params, randomBoolean()); - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + BytesReference mutated; + if (addRandomFields) { + /* + * - don't insert into the root object because it should only contain the named aggregations to test + * + * - don't insert into the "meta" object, because we pass on everything we find there + * + * - we don't want to directly insert anything random into "buckets" objects, they are used with + * "keyed" aggregations and contain named bucket objects. Any new named object on this level should + * also be a bucket and be parsed as such. + * + * - we cannot insert randomly into VALUE or VALUES objects e.g. in Percentiles, the keys need to be numeric there + * + * - we cannot insert into ExtendedMatrixStats "covariance" or "correlation" fields, their syntax is strict + */ + Predicate excludes = path -> (path.isEmpty() || path.endsWith("aggregations") + || path.endsWith(Aggregation.CommonFields.META.getPreferredName()) + || path.endsWith(Aggregation.CommonFields.BUCKETS.getPreferredName()) + || path.endsWith(CommonFields.VALUES.getPreferredName()) || path.endsWith("covariance") || path.endsWith("correlation") + || path.contains(CommonFields.VALUE.getPreferredName())); + mutated = insertRandomFields(xContentType, originalBytes, excludes, random()); + } else { + mutated = originalBytes; + } + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals(Aggregations.AGGREGATIONS_FIELD, parser.currentName()); @@ -187,6 +238,22 @@ public class AggregationsTests extends ESTestCase { } } + public void testParsingExceptionOnUnknownAggregation() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + { + builder.startObject("unknownAggregation"); + builder.endObject(); + } + builder.endObject(); + BytesReference originalBytes = builder.bytes(); + try (XContentParser parser = createParser(builder.contentType().xContent(), originalBytes)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + ParsingException ex = expectThrows(ParsingException.class, () -> Aggregations.fromXContent(parser)); + assertEquals("Could not parse aggregation keyed as [unknownAggregation]", ex.getMessage()); + } + } + public final InternalAggregations createTestInstance() { return createTestInstance(1, 0, 5); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregationTestCase.java index bc1ac5976ff..9ae55a66b25 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregationTestCase.java @@ -90,7 +90,7 @@ public abstract class InternalMultiBucketAggregationTestCase terms = new HashSet<>(); - for (Bucket topTerm : topTerms) { - terms.add(topTerm.getKeyAsString()); - } - assertThat(terms, hasSize(6)); - assertThat(terms.contains("jam"), is(true)); - assertThat(terms.contains("council"), is(true)); - assertThat(terms.contains("style"), is(true)); - assertThat(terms.contains("paul"), is(true)); - assertThat(terms.contains("of"), is(true)); - assertThat(terms.contains("the"), is(true)); - - response = client().prepareSearch("test") - .setQuery(new TermQueryBuilder("description", "weller")) - .addAggregation(significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()) - .includeExclude(new IncludeExclude("weller", null))) - .get(); - assertSearchResponse(response); - topTerms = response.getAggregations().get("mySignificantTerms"); - terms = new HashSet<>(); - for (Bucket topTerm : topTerms) { - terms.add(topTerm.getKeyAsString()); - } - assertThat(terms, hasSize(1)); - assertThat(terms.contains("weller"), is(true)); - } - - public void testIncludeExcludeExactValues() throws Exception { - String []incExcTerms={"weller","nosuchterm"}; - SearchResponse response = client().prepareSearch("test") - .setQuery(new TermQueryBuilder("description", "weller")) - .addAggregation(significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()) - .includeExclude(new IncludeExclude(null, incExcTerms))) - .get(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - Set terms = new HashSet<>(); - for (Bucket topTerm : topTerms) { - terms.add(topTerm.getKeyAsString()); - } - assertEquals(new HashSet(Arrays.asList("jam", "council", "style", "paul", "of", "the")), terms); - - response = client().prepareSearch("test") - .setQuery(new TermQueryBuilder("description", "weller")) - .addAggregation(significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()) - .includeExclude(new IncludeExclude(incExcTerms, null))) - .get(); - assertSearchResponse(response); - topTerms = response.getAggregations().get("mySignificantTerms"); - terms = new HashSet<>(); - for (Bucket topTerm : topTerms) { - terms.add(topTerm.getKeyAsString()); - } - assertThat(terms, hasSize(1)); - assertThat(terms.contains("weller"), is(true)); - } - - public void testUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("fact_category").executionHint(randomExecutionHint()) - .minDocCount(2)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - assertThat(topTerms.getBuckets().size(), equalTo(0)); - } - - public void testTextAnalysis() throws Exception { - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()) - .minDocCount(2)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testTextAnalysisGND() throws Exception { - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()).significanceHeuristic(new GND(true)) - .minDocCount(2)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testTextAnalysisChiSquare() throws Exception { - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()).significanceHeuristic(new ChiSquare(false,true)) - .minDocCount(2)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testTextAnalysisPercentageScore() throws Exception { - SearchResponse response = client() - .prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0) - .setSize(60) - .setExplain(true) - .addAggregation( - significantTerms("mySignificantTerms").field("description").executionHint(randomExecutionHint()) - .significanceHeuristic(new PercentageScore()).minDocCount(2)).execute().actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testBadFilteredAnalysis() throws Exception { - // Deliberately using a bad choice of filter here for the background context in order - // to test robustness. - // We search for the name of a snowboarder but use music-related content (fact_category:1) - // as the background source of term statistics. - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("description") - .minDocCount(2).backgroundFilter(QueryBuilders.termQuery("fact_category", 1))) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - // We expect at least one of the significant terms to have been selected on the basis - // that it is present in the foreground selection but entirely missing from the filtered - // background used as context. - boolean hasMissingBackgroundTerms = false; - for (Bucket topTerm : topTerms) { - if (topTerm.getSupersetDf() == 0) { - hasMissingBackgroundTerms = true; - break; - } - } - assertTrue(hasMissingBackgroundTerms); - } - - public void testFilteredAnalysis() throws Exception { - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "weller")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("description") - .minDocCount(1).backgroundFilter(QueryBuilders.termsQuery("description", "paul"))) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - HashSet topWords = new HashSet(); - for (Bucket topTerm : topTerms) { - topWords.add(topTerm.getKeyAsString()); - } - //The word "paul" should be a constant of all docs in the background set and therefore not seen as significant - assertFalse(topWords.contains("paul")); - //"Weller" is the only Paul who was in The Jam and therefore this should be identified as a differentiator from the background of all other Pauls. - assertTrue(topWords.contains("jam")); - } - - public void testNestedAggs() throws Exception { - String[][] expectedKeywordsByCategory={ - { "paul", "weller", "jam", "style", "council" }, - { "paul", "smith" }, - { "craig", "kelly", "terje", "haakonsen", "burton" }}; - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(terms("myCategories").field("fact_category").minDocCount(2) - .subAggregation( - significantTerms("mySignificantTerms").field("description") - .executionHint(randomExecutionHint()) - .minDocCount(2))) - .execute() - .actionGet(); - assertSearchResponse(response); - Terms topCategoryTerms = response.getAggregations().get("myCategories"); - for (org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket topCategory : topCategoryTerms.getBuckets()) { - SignificantTerms topTerms = topCategory.getAggregations().get("mySignificantTerms"); - HashSet foundTopWords = new HashSet(); - for (Bucket topTerm : topTerms) { - foundTopWords.add(topTerm.getKeyAsString()); - } - String[] expectedKeywords = expectedKeywordsByCategory[Integer.parseInt(topCategory.getKeyAsString()) - 1]; - for (String expectedKeyword : expectedKeywords) { - assertTrue(expectedKeyword + " missing from category keywords", foundTopWords.contains(expectedKeyword)); - } - } - } - - public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped", "test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms").field("description") - .executionHint(randomExecutionHint()) - .minDocCount(2)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testPartiallyUnmappedWithFormat() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped", "test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(boolQuery().should(termQuery("description", "the")).should(termQuery("description", "terje"))) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms") - .field("fact_category") - .executionHint(randomExecutionHint()) - .minDocCount(1) - .format("0000")) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - for (int i = 1; i <= 3; i++) { - String key = String.format(Locale.ROOT, "%04d", i); - SignificantTerms.Bucket bucket = topTerms.getBucketByKey(key); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key)); - } - } - - private void checkExpectedStringTermsFound(SignificantTerms topTerms) { - HashMaptopWords=new HashMap<>(); - for (Bucket topTerm : topTerms ){ - topWords.put(topTerm.getKeyAsString(), topTerm); - } - assertTrue( topWords.containsKey("haakonsen")); - assertTrue( topWords.containsKey("craig")); - assertTrue( topWords.containsKey("kelly")); - assertTrue( topWords.containsKey("burton")); - assertTrue( topWords.containsKey("snowboards")); - Bucket kellyTerm=topWords.get("kelly"); - assertEquals(3, kellyTerm.getSubsetDf()); - assertEquals(4, kellyTerm.getSupersetDf()); - } - - public void testDefaultSignificanceHeuristic() throws Exception { - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms") - .field("description") - .executionHint(randomExecutionHint()) - .significanceHeuristic(new JLHScore()) - .minDocCount(2)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testMutualInformation() throws Exception { - SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("description", "terje")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(significantTerms("mySignificantTerms") - .field("description") - .executionHint(randomExecutionHint()) - .significanceHeuristic(new MutualInformation(false, true)) - .minDocCount(1)) - .execute() - .actionGet(); - assertSearchResponse(response); - SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms"); - checkExpectedStringTermsFound(topTerms); - } - - public void testFailIfFieldNotIndexed() { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, - () -> client().prepareSearch("test_not_indexed").addAggregation( - significantTerms("mySignificantTerms").field("my_keyword")).get()); - assertThat(e.toString(), - containsString("Cannot search on field [my_keyword] since it is not indexed.")); - - e = expectThrows(SearchPhaseExecutionException.class, - () -> client().prepareSearch("test_not_indexed").addAggregation( - significantTerms("mySignificantTerms").field("my_long")).get()); - assertThat(e.toString(), - containsString("Cannot search on field [my_long] since it is not indexed.")); - } -} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index 2dc208d89fb..9c6615f8ff9 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -135,7 +135,7 @@ public class SignificanceHeuristicTests extends ESTestCase { } } - SignificanceHeuristic getRandomSignificanceheuristic() { + public static SignificanceHeuristic getRandomSignificanceheuristic() { List heuristics = new ArrayList<>(); heuristics.add(new JLHScore()); heuristics.add(new MutualInformation(randomBoolean(), randomBoolean())); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java index e2625039df5..20b2894b73e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorTests.java @@ -19,23 +19,43 @@ package org.elasticsearch.search.aggregations.bucket.significant; +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; +import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory.ExecutionMode; +import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.support.ValueType; import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; +import java.util.List; public class SignificantTermsAggregatorTests extends AggregatorTestCase { @@ -72,4 +92,199 @@ public class SignificantTermsAggregatorTests extends AggregatorTestCase { assertEquals(1, ((BooleanQuery) parsedQuery).getMinimumNumberShouldMatch()); } + /** + * Uses the significant terms aggregation to find the keywords in text fields + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25429") + public void testSignificance() throws IOException { + TextFieldType textFieldType = new TextFieldType(); + textFieldType.setName("text"); + textFieldType.setFielddata(true); + textFieldType.setIndexAnalyzer(new NamedAnalyzer("my_analyzer", AnalyzerScope.GLOBAL, new StandardAnalyzer())); + + IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) { + addMixedTextDocs(textFieldType, w); + + SignificantTermsAggregationBuilder sigAgg = new SignificantTermsAggregationBuilder("sig_text", null).field("text"); + sigAgg.executionHint(randomExecutionHint()); + if (randomBoolean()) { + // Use a background filter which just happens to be same scope as whole-index. + sigAgg.backgroundFilter(QueryBuilders.termsQuery("text", "common")); + } + + SignificantTermsAggregationBuilder sigNumAgg = new SignificantTermsAggregationBuilder("sig_number", null).field("long_field"); + sigNumAgg.executionHint(randomExecutionHint()); + + try (IndexReader reader = DirectoryReader.open(w)) { + IndexSearcher searcher = new IndexSearcher(reader); + + // Search "odd" + SignificantTerms terms = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), sigAgg, textFieldType); + + assertEquals(1, terms.getBuckets().size()); + assertNull(terms.getBucketByKey("even")); + assertNull(terms.getBucketByKey("common")); + assertNotNull(terms.getBucketByKey("odd")); + + // Search even + terms = searchAndReduce(searcher, new TermQuery(new Term("text", "even")), sigAgg, textFieldType); + + assertEquals(1, terms.getBuckets().size()); + assertNull(terms.getBucketByKey("odd")); + assertNull(terms.getBucketByKey("common")); + assertNotNull(terms.getBucketByKey("even")); + + // Search odd with regex includeexcludes + sigAgg.includeExclude(new IncludeExclude("o.d", null)); + terms = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), sigAgg, textFieldType); + assertEquals(1, terms.getBuckets().size()); + assertNotNull(terms.getBucketByKey("odd")); + assertNull(terms.getBucketByKey("common")); + assertNull(terms.getBucketByKey("even")); + + // Search with string-based includeexcludes + String oddStrings[] = new String[] {"odd", "weird"}; + String evenStrings[] = new String[] {"even", "regular"}; + + sigAgg.includeExclude(new IncludeExclude(oddStrings, evenStrings)); + sigAgg.significanceHeuristic(SignificanceHeuristicTests.getRandomSignificanceheuristic()); + terms = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), sigAgg, textFieldType); + assertEquals(1, terms.getBuckets().size()); + assertNotNull(terms.getBucketByKey("odd")); + assertNull(terms.getBucketByKey("weird")); + assertNull(terms.getBucketByKey("common")); + assertNull(terms.getBucketByKey("even")); + assertNull(terms.getBucketByKey("regular")); + + sigAgg.includeExclude(new IncludeExclude(evenStrings, oddStrings)); + terms = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), sigAgg, textFieldType); + assertEquals(0, terms.getBuckets().size()); + assertNull(terms.getBucketByKey("odd")); + assertNull(terms.getBucketByKey("weird")); + assertNull(terms.getBucketByKey("common")); + assertNull(terms.getBucketByKey("even")); + assertNull(terms.getBucketByKey("regular")); + + } + } + } + + /** + * Uses the significant terms aggregation to find the keywords in numeric + * fields + */ + public void testNumericSignificance() throws IOException { + NumberFieldType longFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + longFieldType.setName("long_field"); + + TextFieldType textFieldType = new TextFieldType(); + textFieldType.setName("text"); + textFieldType.setIndexAnalyzer(new NamedAnalyzer("my_analyzer", AnalyzerScope.GLOBAL, new StandardAnalyzer())); + + IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + final long ODD_VALUE = 3; + final long EVEN_VALUE = 6; + final long COMMON_VALUE = 2; + + try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) { + + for (int i = 0; i < 10; i++) { + Document doc = new Document(); + if (i % 2 == 0) { + addFields(doc, NumberType.LONG.createFields("long_field", ODD_VALUE, true, true, false)); + doc.add(new Field("text", "odd", textFieldType)); + } else { + addFields(doc, NumberType.LONG.createFields("long_field", EVEN_VALUE, true, true, false)); + doc.add(new Field("text", "even", textFieldType)); + } + addFields(doc, NumberType.LONG.createFields("long_field", COMMON_VALUE, true, true, false)); + w.addDocument(doc); + } + + SignificantTermsAggregationBuilder sigNumAgg = new SignificantTermsAggregationBuilder("sig_number", null).field("long_field"); + sigNumAgg.executionHint(randomExecutionHint()); + + try (IndexReader reader = DirectoryReader.open(w)) { + IndexSearcher searcher = new IndexSearcher(reader); + + // Search "odd" + SignificantLongTerms terms = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), sigNumAgg, longFieldType); + assertEquals(1, terms.getBuckets().size()); + + assertNull(terms.getBucketByKey(Long.toString(EVEN_VALUE))); + assertNull(terms.getBucketByKey(Long.toString(COMMON_VALUE))); + assertNotNull(terms.getBucketByKey(Long.toString(ODD_VALUE))); + + terms = searchAndReduce(searcher, new TermQuery(new Term("text", "even")), sigNumAgg, longFieldType); + assertEquals(1, terms.getBuckets().size()); + + assertNull(terms.getBucketByKey(Long.toString(ODD_VALUE))); + assertNull(terms.getBucketByKey(Long.toString(COMMON_VALUE))); + assertNotNull(terms.getBucketByKey(Long.toString(EVEN_VALUE))); + + } + } + } + + /** + * Uses the significant terms aggregation on an index with unmapped field + */ + public void testUnmapped() throws IOException { + TextFieldType textFieldType = new TextFieldType(); + textFieldType.setName("text"); + textFieldType.setFielddata(true); + textFieldType.setIndexAnalyzer(new NamedAnalyzer("my_analyzer", AnalyzerScope.GLOBAL, new StandardAnalyzer())); + + IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) { + addMixedTextDocs(textFieldType, w); + + // Attempt aggregation on unmapped field + SignificantTermsAggregationBuilder sigAgg = new SignificantTermsAggregationBuilder("sig_text", null).field("unmapped_field"); + sigAgg.executionHint(randomExecutionHint()); + + try (IndexReader reader = DirectoryReader.open(w)) { + IndexSearcher searcher = new IndexSearcher(reader); + + // Search "odd" + SignificantTerms terms = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), sigAgg, textFieldType); + assertEquals(0, terms.getBuckets().size()); + + assertNull(terms.getBucketByKey("even")); + assertNull(terms.getBucketByKey("common")); + assertNull(terms.getBucketByKey("odd")); + + } + } + } + + private void addMixedTextDocs(TextFieldType textFieldType, IndexWriter w) throws IOException { + for (int i = 0; i < 10; i++) { + Document doc = new Document(); + StringBuilder text = new StringBuilder("common "); + if (i % 2 == 0) { + text.append("odd "); + } else { + text.append("even "); + } + + doc.add(new Field("text", text.toString(), textFieldType)); + String json = "{ \"text\" : \"" + text.toString() + "\" }"; + doc.add(new StoredField("_source", new BytesRef(json))); + + w.addDocument(doc); + } + } + + private void addFields(Document doc, List createFields) { + for (Field field : createFields) { + doc.add(field); + } + } + + public String randomExecutionHint() { + return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); + } + } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java index 8376d8c57a1..1057d3a71e0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java @@ -65,10 +65,6 @@ public class SignificantTextAggregatorTests extends AggregatorTestCase { textFieldType.setIndexAnalyzer(new NamedAnalyzer("my_analyzer", AnalyzerScope.GLOBAL, new StandardAnalyzer())); IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); - indexWriterConfig.setMaxBufferedDocs(100); - indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a - // single segment with - // predictable docIds try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) { for (int i = 0; i < 10; i++) { Document doc = new Document(); @@ -95,7 +91,6 @@ public class SignificantTextAggregatorTests extends AggregatorTestCase { .subAggregation(sigAgg); try (IndexReader reader = DirectoryReader.open(w)) { - assertEquals("test expects a single segment", 1, reader.leaves().size()); IndexSearcher searcher = new IndexSearcher(reader); // Search "odd" which should have no duplication @@ -145,7 +140,6 @@ public class SignificantTextAggregatorTests extends AggregatorTestCase { SignificantTextAggregationBuilder sigAgg = new SignificantTextAggregationBuilder("sig_text", "text"); sigAgg.sourceFieldNames(Arrays.asList(new String [] {"title", "text"})); try (IndexReader reader = DirectoryReader.open(w)) { - assertEquals("test expects a single segment", 1, reader.leaves().size()); IndexSearcher searcher = new IndexSearcher(reader); searchAndReduce(searcher, new TermQuery(new Term("text", "foo")), sigAgg, textFieldType); // No significant results to be found in this test - only checking we don't end up diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index ec91db0c8fc..51d54e5dcb1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -23,8 +23,10 @@ import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.ObjectObjectHashMap; import com.carrotsearch.hppc.ObjectObjectMap; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; @@ -32,7 +34,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -189,7 +190,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { SearchHit searchHit = response.getHits().getAt(i); assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx")); assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getType(), equalTo("type")); - SearchHitField hitField = searchHit.field(NUMBER_FIELD_NAME); + DocumentField hitField = searchHit.field(NUMBER_FIELD_NAME); assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1)); Long value = hitField.getValue(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index a2ebb378fc3..4fc4ec9ac60 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; @@ -40,6 +39,7 @@ import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetric; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.junit.Before; import java.io.IOException; import java.nio.file.Files; @@ -253,14 +253,16 @@ public class ScriptedMetricIT extends ESIntegTestCase { ensureSearchable(); } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Path config = createTempDir().resolve("config"); - Path scripts = config.resolve("scripts"); + private Path config; + + @Before + public void setUp() throws Exception { + super.setUp(); + config = createTempDir().resolve("config"); + final Path scripts = config.resolve("scripts"); try { Files.createDirectories(scripts); - // When using the MockScriptPlugin we can map File scripts to inline scripts: // the name of the file script is used in test method while the source of the file script // must match a predefined script from CustomScriptPlugin.pluginScripts() method @@ -271,11 +273,11 @@ public class ScriptedMetricIT extends ESIntegTestCase { } catch (IOException e) { throw new RuntimeException("failed to create scripts"); } + } - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(Environment.PATH_CONF_SETTING.getKey(), config) - .build(); + @Override + protected Path nodeConfigPath(int nodeOrdinal) { + return config; } public void testMap() { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index a90960c2ec9..2287d2ba986 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -36,9 +37,9 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -47,7 +48,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.tophits.TopHits; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.rescore.RescoreBuilder; @@ -615,7 +615,7 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(hit.getMatchedQueries()[0], equalTo("test")); - SearchHitField field = hit.field("field1"); + DocumentField field = hit.field("field1"); assertThat(field.getValue().toString(), equalTo("5")); assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain")); @@ -893,7 +893,7 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(searchHit.getMatchedQueries(), arrayContaining("test")); - SearchHitField field = searchHit.field("comments.user"); + DocumentField field = searchHit.field("comments.user"); assertThat(field.getValue().toString(), equalTo("a")); field = searchHit.field("script"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java index e7d808a9b3d..e54a2a8b9a1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesTestCase.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; @@ -29,6 +30,7 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Predicate; public abstract class AbstractPercentilesTestCase> extends InternalAggregationTestCase { @@ -62,7 +64,7 @@ public abstract class AbstractPercentilesTestCase parsedAggregation = parseAndAssert(aggregation, false); + final Iterable parsedAggregation = parseAndAssert(aggregation, false, false); Iterator it = aggregation.iterator(); Iterator parsedIt = parsedAggregation.iterator(); @@ -82,4 +84,9 @@ public abstract class AbstractPercentilesTestCase excludePathsFromXContentInsertion() { + return path -> path.endsWith(CommonFields.VALUES.getPreferredName()); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java index 8e06926ea05..be105f2af80 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentilesTestCase.java @@ -21,9 +21,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.test.InternalAggregationTestCase; -import org.junit.Before; import java.util.List; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java index 728ddf6afa8..d9379edefef 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentilesRanksTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesRanksTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.ParsedPercentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.test.InternalAggregationTestCase; import java.util.Arrays; import java.util.List; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java index 78df637e068..3e524b17721 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java @@ -24,11 +24,11 @@ import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; @@ -39,6 +39,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import java.util.function.Supplier; public class InternalScriptedMetricTests extends InternalAggregationTestCase { @@ -185,4 +186,9 @@ public class InternalScriptedMetricTests extends InternalAggregationTestCase excludePathsFromXContentInsertion() { + return path -> path.contains(CommonFields.VALUE.getPreferredName()); + } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java index af4e0bac3ec..cfec0d6aaee 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java @@ -28,10 +28,10 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.text.Text; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -85,7 +85,7 @@ public class InternalTopHitsTests extends InternalAggregationTestCase between(0, IndexWriter.MAX_DOCS)); usedDocIds.add(docId); - Map searchHitFields = new HashMap<>(); + Map searchHitFields = new HashMap<>(); if (testInstancesLookSortedByField) { Object[] fields = new Object[testInstancesSortFields.length]; for (int f = 0; f < testInstancesSortFields.length; f++) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java index ed4fc007761..0adbafb843f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucketTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregation.CommonFields; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -31,6 +32,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import static org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentilesTestCase.randomPercents; @@ -110,11 +112,16 @@ public class InternalPercentilesBucketTests extends InternalAggregationTestCase< public void testParsedAggregationIteratorOrder() throws IOException { final InternalPercentilesBucket aggregation = createTestInstance(); - final Iterable parsedAggregation = parseAndAssert(aggregation, false); + final Iterable parsedAggregation = parseAndAssert(aggregation, false, false); Iterator it = aggregation.iterator(); Iterator parsedIt = parsedAggregation.iterator(); while (it.hasNext()) { assertEquals(it.next(), parsedIt.next()); } } + + @Override + protected Predicate excludePathsFromXContentInsertion() { + return path -> path.endsWith(CommonFields.VALUES.getPreferredName()); + } } diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index ed70aa119f6..92488a69d6d 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -126,8 +126,10 @@ public class TransportTwoNodesSearchIT extends ESIntegTestCase { // to produce the same 8-bit norm for all docs here, so that // the tf is basically the entire score (assuming idf is fixed, which // it should be if dfs is working correctly) - for (int i = 1024; i < 1124; i++) { - index(Integer.toString(i - 1024), "test", i); + // With the current way of encoding norms, every length between 1048 and 1176 + // are encoded into the same byte + for (int i = 1048; i < 1148; i++) { + index(Integer.toString(i - 1048), "test", i); } refresh(); diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java index d3a8946571b..cd15c966834 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.child; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.cluster.ClusterState; @@ -30,9 +31,9 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -60,7 +61,7 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) // We never want merges in this test to ensure we have two segments for the last validation .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) - .put("index.mapping.single_type", false) + .put("index.version.created", Version.V_5_6_0) .build(); public void testEagerParentFieldLoading() throws Exception { @@ -132,25 +133,22 @@ public class ParentFieldLoadingIT extends ESIntegTestCase { .get(); assertAcked(putMappingResponse); Index test = resolveIndex("test"); - assertBusy(new Runnable() { - @Override - public void run() { - ClusterState clusterState = internalCluster().clusterService().state(); - ShardRouting shardRouting = clusterState.routingTable().index("test").shard(0).getShards().get(0); - String nodeName = clusterState.getNodes().get(shardRouting.currentNodeId()).getName(); + assertBusy(() -> { + ClusterState clusterState = internalCluster().clusterService().state(); + ShardRouting shardRouting = clusterState.routingTable().index("test").shard(0).getShards().get(0); + String nodeName = clusterState.getNodes().get(shardRouting.currentNodeId()).getName(); - boolean verified = false; - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); - IndexService indexService = indicesService.indexService(test); - if (indexService != null) { - MapperService mapperService = indexService.mapperService(); - DocumentMapper documentMapper = mapperService.documentMapper("child"); - if (documentMapper != null) { - verified = documentMapper.parentFieldMapper().fieldType().eagerGlobalOrdinals(); - } + boolean verified = false; + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); + IndexService indexService = indicesService.indexService(test); + if (indexService != null) { + MapperService mapperService = indexService.mapperService(); + DocumentMapper documentMapper = mapperService.documentMapper("child"); + if (documentMapper != null) { + verified = documentMapper.parentFieldMapper().fieldType().eagerGlobalOrdinals(); } - assertTrue(verified); } + assertTrue(verified); }); // Need to add a new doc otherwise the refresh doesn't trigger a new searcher diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index 6c1d0877f7b..0912236e018 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -36,7 +37,6 @@ import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.SearchExtBuilder; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESIntegTestCase; @@ -129,9 +129,9 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { if (hitContext.hit().fieldsOrNull() == null) { hitContext.hit().fields(new HashMap<>()); } - SearchHitField hitField = hitContext.hit().getFields().get(NAME); + DocumentField hitField = hitContext.hit().getFields().get(NAME); if (hitField == null) { - hitField = new SearchHitField(NAME, new ArrayList<>(1)); + hitField = new DocumentField(NAME, new ArrayList<>(1)); hitContext.hit().getFields().put(NAME, hitField); } TermVectorsRequest termVectorsRequest = new TermVectorsRequest(context.indexShard().shardId().getIndex().getName(), diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 8f092383a5b..3742166a61b 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.fetch.subphase; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -40,8 +41,10 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -72,7 +75,7 @@ public class InnerHitsIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singleton(CustomScriptPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { @@ -591,7 +594,7 @@ public class InnerHitsIT extends ESIntegTestCase { public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertAcked(prepareCreate("index1") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addMapping("parent_type", "nested_type", "type=nested") .addMapping("child_type", "_parent", "type=parent_type") ); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 9cbd9fc5d75..acaf35429be 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -214,54 +214,6 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertHighlight(search, 0, "name", 0, startsWith("abc abc abc abc")); } - public void testNgramHighlighting() throws IOException { - assertAcked(prepareCreate("test") - .addMapping("test", - "name", "type=text,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," - + "term_vector=with_positions_offsets", - "name2", "type=text,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," - + "term_vector=with_positions_offsets") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("analysis.filter.my_ngram.max_gram", 20) - .put("analysis.filter.my_ngram.min_gram", 1) - .put("analysis.filter.my_ngram.type", "ngram") - .put("analysis.tokenizer.my_ngramt.max_gram", 20) - .put("analysis.tokenizer.my_ngramt.min_gram", 1) - .put("analysis.tokenizer.my_ngramt.token_chars", "letter,digit") - .put("analysis.tokenizer.my_ngramt.type", "ngram") - .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt") - .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace") - .put("analysis.analyzer.name2_index_analyzer.filter", "my_ngram") - .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace"))); - client().prepareIndex("test", "test", "1") - .setSource("name", "logicacmg ehemals avinci - the know how company", - "name2", "logicacmg ehemals avinci - the know how company").get(); - refresh(); - ensureGreen(); - SearchResponse search = client().prepareSearch().setQuery(matchQuery("name", "logica m")) - .highlighter(new HighlightBuilder().field("name")).get(); - assertHighlight(search, 0, "name", 0, - equalTo("logicacmg ehemals avinci - the know how company")); - - search = client().prepareSearch().setQuery(matchQuery("name", "logica ma")).highlighter(new HighlightBuilder().field("name")).get(); - assertHighlight(search, 0, "name", 0, equalTo("logicacmg ehemals avinci - the know how company")); - - search = client().prepareSearch().setQuery(matchQuery("name", "logica")).highlighter(new HighlightBuilder().field("name")).get(); - assertHighlight(search, 0, "name", 0, equalTo("logicacmg ehemals avinci - the know how company")); - - search = client().prepareSearch().setQuery(matchQuery("name2", "logica m")).highlighter(new HighlightBuilder().field("name2")) - .get(); - assertHighlight(search, 0, "name2", 0, equalTo("logicacmg ehemals avinci - the know how company")); - - search = client().prepareSearch().setQuery(matchQuery("name2", "logica ma")).highlighter(new HighlightBuilder().field("name2")) - .get(); - assertHighlight(search, 0, "name2", 0, equalTo("logicacmg ehemals avinci - the know how company")); - - search = client().prepareSearch().setQuery(matchQuery("name2", "logica")).highlighter(new HighlightBuilder().field("name2")).get(); - assertHighlight(search, 0, "name2", 0, equalTo("logicacmg ehemals avinci - the know how company")); - } - public void testEnsureNoNegativeOffsets() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", @@ -1407,29 +1359,27 @@ public class HighlighterSearchIT extends ESIntegTestCase { public void testPhrasePrefix() throws IOException { Builder builder = Settings.builder() .put(indexSettings()) - .put("index.mapping.single_type", false) .put("index.analysis.analyzer.synonym.tokenizer", "whitespace") .putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase") .put("index.analysis.filter.synonym.type", "synonym") .putArray("index.analysis.filter.synonym.synonyms", "quick => fast"); - assertAcked(prepareCreate("test").setSettings(builder.build()).addMapping("type1", type1TermVectorMapping()) - .addMapping("type2", - "field4", "type=text,term_vector=with_positions_offsets,analyzer=synonym", - "field3", "type=text,analyzer=synonym")); + assertAcked(prepareCreate("first_test_index").setSettings(builder.build()).addMapping("type1", type1TermVectorMapping())); + ensureGreen(); - client().prepareIndex("test", "type1", "0").setSource( + client().prepareIndex("first_test_index", "type1", "0").setSource( "field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog").get(); - client().prepareIndex("test", "type1", "1").setSource("field1", "The quick browse button is a fancy thing, right bro?").get(); + client().prepareIndex("first_test_index", "type1", "1").setSource("field1", + "The quick browse button is a fancy thing, right bro?").get(); refresh(); logger.info("--> highlighting and searching on field0"); SearchSourceBuilder source = searchSource() .query(matchPhrasePrefixQuery("field0", "bro")) .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + SearchResponse searchResponse = client().search(searchRequest("first_test_index").source(source)).actionGet(); assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); @@ -1437,7 +1387,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .query(matchPhrasePrefixQuery("field0", "quick bro")) .highlighter(highlight().field("field0").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("first_test_index").source(source)).actionGet(); assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); logger.info("--> highlighting and searching on field1"); @@ -1448,7 +1398,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { ) .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("first_test_index").source(source)).actionGet(); assertThat(searchResponse.getHits().totalHits, equalTo(2L)); for (int i = 0; i < 2; i++) { assertHighlight(searchResponse, i, "field1", 0, 1, anyOf( @@ -1460,7 +1410,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .query(matchPhrasePrefixQuery("field1", "quick bro")) .highlighter(highlight().field("field1").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("first_test_index").source(source)).actionGet(); assertHighlight(searchResponse, 0, "field1", 0, 1, anyOf( equalTo("The quick browse button is a fancy thing, right bro?"), @@ -1469,27 +1419,33 @@ public class HighlighterSearchIT extends ESIntegTestCase { equalTo("The quick browse button is a fancy thing, right bro?"), equalTo("The quick brown fox jumps over the lazy dog"))); + assertAcked(prepareCreate("second_test_index").setSettings(builder.build()).addMapping("doc", + "field4", "type=text,term_vector=with_positions_offsets,analyzer=synonym", + "field3", "type=text,analyzer=synonym")); // with synonyms - client().prepareIndex("test", "type2", "0").setSource( + client().prepareIndex("second_test_index", "doc", "0").setSource( + "type", "type2", "field4", "The quick brown fox jumps over the lazy dog", "field3", "The quick brown fox jumps over the lazy dog").get(); - client().prepareIndex("test", "type2", "1").setSource( + client().prepareIndex("second_test_index", "doc", "1").setSource( + "type", "type2", "field4", "The quick browse button is a fancy thing, right bro?").get(); - client().prepareIndex("test", "type2", "2").setSource( + client().prepareIndex("second_test_index", "doc", "2").setSource( + "type", "type2", "field4", "a quick fast blue car").get(); refresh(); - source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field3", "fast bro")) + source = searchSource().postFilter(termQuery("type", "type2")).query(matchPhrasePrefixQuery("field3", "fast bro")) .highlighter(highlight().field("field3").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("second_test_index").source(source)).actionGet(); assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro")) + source = searchSource().postFilter(termQuery("type", "type2")).query(matchPhrasePrefixQuery("field4", "the fast bro")) .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("second_test_index").source(source)).actionGet(); assertHighlight(searchResponse, 0, "field4", 0, 1, anyOf( equalTo("The quick browse button is a fancy thing, right bro?"), @@ -1499,9 +1455,9 @@ public class HighlighterSearchIT extends ESIntegTestCase { equalTo("The quick brown fox jumps over the lazy dog"))); logger.info("--> highlighting and searching on field4"); - source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) + source = searchSource().postFilter(termQuery("type", "type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca")) .highlighter(highlight().field("field4").order("score").preTags("").postTags("")); - searchResponse = client().search(searchRequest("test").source(source)).actionGet(); + searchResponse = client().search(searchRequest("second_test_index").source(source)).actionGet(); assertHighlight(searchResponse, 0, "field4", 0, 1, anyOf(equalTo("a quick fast blue car"), diff --git a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index b33196cf005..5ae26875858 100644 --- a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -19,14 +19,15 @@ package org.elasticsearch.search.fields; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -38,11 +39,11 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.lookup.FieldLookup; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; @@ -81,7 +82,7 @@ public class SearchFieldsIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(CustomScriptPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); } public static class CustomScriptPlugin extends MockScriptPlugin { @@ -485,7 +486,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertNoFailures(response); - SearchHitField fieldObj = response.getHits().getAt(0).field("test_script_1"); + DocumentField fieldObj = response.getHits().getAt(0).field("test_script_1"); assertThat(fieldObj, notNullValue()); List fieldValues = fieldObj.getValues(); assertThat(fieldValues, hasSize(1)); @@ -640,10 +641,9 @@ public class SearchFieldsIT extends ESIntegTestCase { public void testGetFieldsComplexField() throws Exception { client().admin().indices().prepareCreate("my-index") .setSettings("index.refresh_interval", -1) - .setSettings("index.mapping.single_type", false) - .addMapping("my-type2", jsonBuilder() + .addMapping("doc", jsonBuilder() .startObject() - .startObject("my-type2") + .startObject("doc") .startObject("properties") .startObject("field1") .field("type", "object") @@ -692,19 +692,12 @@ public class SearchFieldsIT extends ESIntegTestCase { .endArray() .endObject().bytes(); - client().prepareIndex("my-index", "my-type1", "1").setSource(source, XContentType.JSON).get(); - client().prepareIndex("my-index", "my-type2", "1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); + client().prepareIndex("my-index", "doc", "1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); String field = "field1.field2.field3.field4"; - SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addStoredField(field).get(); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); - searchResponse = client().prepareSearch("my-index").setTypes("my-type2").addStoredField(field).get(); + SearchResponse searchResponse = client().prepareSearch("my-index").addStoredField(field).get(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false)); assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); @@ -721,7 +714,7 @@ public class SearchFieldsIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setSource( new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).fieldDataField("test_field")).get(); assertHitCount(searchResponse, 1); - Map fields = searchResponse.getHits().getHits()[0].getFields(); + Map fields = searchResponse.getHits().getHits()[0].getFields(); assertThat(fields.get("test_field").getValue(), equalTo("foobar")); } @@ -860,7 +853,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertSearchResponse(resp); for (SearchHit hit : resp.getHits().getHits()) { final int id = Integer.parseInt(hit.getId()); - Map fields = hit.getFields(); + Map fields = hit.getFields(); assertThat(fields.get("s").getValues(), equalTo(Collections. singletonList(Integer.toString(id)))); assertThat(fields.get("l").getValues(), equalTo(Collections. singletonList((long) id))); assertThat(fields.get("d").getValues(), equalTo(Collections. singletonList((double) id))); @@ -871,27 +864,21 @@ public class SearchFieldsIT extends ESIntegTestCase { } public void testLoadMetadata() throws Exception { - assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) - .addMapping("parent") - .addMapping("my-type1", "_parent", "type=parent")); + assertAcked(prepareCreate("test")); indexRandom(true, - client().prepareIndex("test", "my-type1", "1") + client().prepareIndex("test", "doc", "1") .setRouting("1") - .setParent("parent_1") .setSource(jsonBuilder().startObject().field("field1", "value").endObject())); SearchResponse response = client().prepareSearch("test").addStoredField("field1").get(); assertSearchResponse(response); assertHitCount(response, 1); - Map fields = response.getHits().getAt(0).getFields(); + Map fields = response.getHits().getAt(0).getFields(); assertThat(fields.get("field1"), nullValue()); assertThat(fields.get("_routing").isMetadataField(), equalTo(true)); assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); - assertThat(fields.get("_parent").isMetadataField(), equalTo(true)); - assertThat(fields.get("_parent").getValue().toString(), equalTo("parent_1")); } } diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index 5f722a86a23..b43e479031d 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -273,14 +273,14 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { .setId("1") .setIndex("test") .setSource( - jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21).endObject() - .endObject())); + jsonBuilder().startObject().field("test", "value value").startObject("loc").field("lat", 11).field("lon", 21) + .endObject().endObject())); indexBuilders.add(client().prepareIndex() .setType("type1") .setId("2") .setIndex("test") .setSource( - jsonBuilder().startObject().field("test", "value value").startObject("loc").field("lat", 11).field("lon", 20) + jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 20) .endObject().endObject())); indexRandom(true, false, indexBuilders); // force no dummy docs @@ -297,10 +297,19 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { SearchResponse sr = response.actionGet(); SearchHits sh = sr.getHits(); assertThat(sh.getTotalHits(), equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), isOneOf("1")); + assertThat(sh.getAt(0).getId(), equalTo("1")); assertThat(sh.getAt(1).getId(), equalTo("2")); // Test Exp + response = client().search( + searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( + searchSource().query(termQuery("test", "value")))); + sr = response.actionGet(); + sh = sr.getHits(); + assertThat(sh.getTotalHits(), equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index cfed4c014b3..2bf691e6a36 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -83,7 +83,7 @@ public class ExplainableScriptIT extends ESIntegTestCase { return new MyScript(lookup.doc().getLeafDocLookup(context)); } @Override - public boolean needsScores() { + public boolean needs_score() { return false; } }; diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 18db8cd539e..2b188adeb70 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -294,8 +294,8 @@ public class QueryRescorerIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getHits().length, equalTo(4)); assertHitCount(searchResponse, 4); assertThat(searchResponse.getHits().getMaxScore(), equalTo(searchResponse.getHits().getHits()[0].getScore())); - assertFirstHit(searchResponse, hasId("6")); - assertSecondHit(searchResponse, hasId("1")); + assertFirstHit(searchResponse, hasId("1")); + assertSecondHit(searchResponse, hasId("6")); assertThirdHit(searchResponse, hasId("3")); assertFourthHit(searchResponse, hasId("2")); } @@ -392,29 +392,6 @@ public class QueryRescorerIT extends ESIntegTestCase { } } - private static void assertEquivalentOrSubstringMatch(String query, SearchResponse plain, SearchResponse rescored) { - assertNoFailures(plain); - assertNoFailures(rescored); - SearchHits leftHits = plain.getHits(); - SearchHits rightHits = rescored.getHits(); - assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits())); - assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length)); - SearchHit[] hits = leftHits.getHits(); - SearchHit[] otherHits = rightHits.getHits(); - if (!hits[0].getId().equals(otherHits[0].getId())) { - assertThat(((String) otherHits[0].getSourceAsMap().get("field1")).contains(query), equalTo(true)); - } else { - Arrays.sort(hits, searchHitsComparator); - Arrays.sort(otherHits, searchHitsComparator); - for (int i = 0; i < hits.length; i++) { - if (hits[i].getScore() == hits[hits.length-1].getScore()) { - return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs - } - assertThat(query, hits[i].getId(), equalTo(rightHits.getHits()[i].getId())); - } - } - } - // forces QUERY_THEN_FETCH because of https://github.com/elastic/elasticsearch/issues/4829 public void testEquivalence() throws Exception { // no dummy docs since merges can change scores while we run queries. @@ -461,18 +438,6 @@ public class QueryRescorerIT extends ESIntegTestCase { .actionGet(); // check equivalence assertEquivalent(query, plain, rescored); - - rescored = client() - .prepareSearch() - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference("test") // ensure we hit the same shards for tie-breaking - .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) - .setFrom(0) - .setSize(resultSize) - .setRescorer(queryRescorer(matchPhraseQuery("field1", intToEnglish).slop(0)) - .setQueryWeight(1.0f).setRescoreQueryWeight(1.0f), 2 * rescoreWindow).execute().actionGet(); - // check equivalence or if the first match differs we check if the phrase is a substring of the top doc - assertEquivalentOrSubstringMatch(intToEnglish, plain, rescored); } } diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 8d4f2921f27..d504df60b63 100644 --- a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.morelikethis; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -32,10 +33,14 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; @@ -58,6 +63,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; public class MoreLikeThisIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + public void testSimpleMoreLikeThis() throws Exception { logger.info("Creating index test"); assertAcked(prepareCreate("test").addMapping("type1", @@ -82,14 +93,13 @@ public class MoreLikeThisIT extends ESIntegTestCase { public void testSimpleMoreLikeOnLongField() throws Exception { logger.info("Creating index test"); assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) .addMapping("type1", "some_long", "type=long")); logger.info("Running Cluster Health"); assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); logger.info("Indexing..."); client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("some_long", 1367484649580L).endObject())).actionGet(); - client().index(indexRequest("test").type("type2").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())).actionGet(); + client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())).actionGet(); client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("some_long", -666).endObject())).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); @@ -360,7 +370,7 @@ public class MoreLikeThisIT extends ESIntegTestCase { logger.info("Creating index test"); int numOfTypes = randomIntBetween(2, 10); CreateIndexRequestBuilder createRequestBuilder = prepareCreate("test") - .setSettings("index.mapping.single_type", false); + .setSettings("index.version.created", Version.V_5_6_0.id); for (int i = 0; i < numOfTypes; i++) { createRequestBuilder.addMapping("type" + i, jsonBuilder().startObject().startObject("type" + i).startObject("properties") .startObject("text").field("type", "text").endObject() diff --git a/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java index 76de9d56e32..43c6018d8f8 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java +++ b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java @@ -22,16 +22,24 @@ package org.elasticsearch.search.profile.query; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.RandomApproximationQuery; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.TestUtil; @@ -45,6 +53,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -191,4 +200,76 @@ public class QueryProfilerTests extends ESTestCase { leafCollector.collect(0); assertThat(profileCollector.getTime(), greaterThan(time)); } + + private static class DummyQuery extends Query { + + @Override + public String toString(String field) { + return getClass().getSimpleName(); + } + + @Override + public boolean equals(Object obj) { + return this == obj; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + return new Weight(this) { + @Override + public void extractTerms(Set terms) { + throw new UnsupportedOperationException(); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { + final Weight weight = this; + return new ScorerSupplier() { + + @Override + public Scorer get(boolean randomAccess) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long cost() { + return 42; + } + }; + } + }; + } + } + + public void testScorerSupplier() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); + w.addDocument(new Document()); + DirectoryReader reader = DirectoryReader.open(w); + w.close(); + IndexSearcher s = newSearcher(reader); + s.setQueryCache(null); + Weight weight = s.createNormalizedWeight(new DummyQuery(), randomBoolean()); + // exception when getting the scorer + expectThrows(UnsupportedOperationException.class, () -> weight.scorer(s.getIndexReader().leaves().get(0))); + // no exception, means scorerSupplier is delegated + weight.scorerSupplier(s.getIndexReader().leaves().get(0)); + reader.close(); + dir.close(); + } } diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index b05c6dff04b..016406c6129 100644 --- a/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -217,18 +217,18 @@ public class QueryPhaseTests extends ESTestCase { context.setSize(10); QueryPhase.execute(context, contextSearcher, null); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); assertTrue(collected.get()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo(numDocs)); + assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); QueryPhase.execute(context, contextSearcher, null); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(10)); - assertThat(context.queryResult().getTotalHits(), equalTo(numDocs)); + assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().scoreDocs[0].doc, greaterThanOrEqualTo(10)); reader.close(); dir.close(); @@ -270,14 +270,14 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); context.setSize(0); QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); } @@ -286,7 +286,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); } { @@ -300,7 +300,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); context.setSize(0); @@ -309,7 +309,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); } { @@ -320,7 +320,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); } { @@ -331,7 +331,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, null); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } @@ -377,7 +377,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, sort); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; @@ -390,7 +390,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, sort); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs - 1)); + assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs - 1L)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); @@ -402,7 +402,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, sort); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); @@ -416,7 +416,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, sort); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, lessThan(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, lessThan((long) numDocs)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); @@ -427,7 +427,7 @@ public class QueryPhaseTests extends ESTestCase { QueryPhase.execute(context, contextSearcher, sort); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, lessThan(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, lessThan((long) numDocs)); assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); @@ -475,20 +475,20 @@ public class QueryPhaseTests extends ESTestCase { }; QueryPhase.execute(context, contextSearcher, sort); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); assertTrue(collected.get()); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo(numDocs)); + assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); int sizeMinus1 = context.queryResult().topDocs().scoreDocs.length - 1; FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[sizeMinus1]; QueryPhase.execute(context, contextSearcher, sort); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs)); + assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); assertTrue(collected.get()); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo(numDocs)); + assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; for (int i = 0; i < sort.getSort().length; i++) { @SuppressWarnings("unchecked") diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java index 05a72276362..bd8cfbcaa5a 100644 --- a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java @@ -19,16 +19,6 @@ package org.elasticsearch.search.query; -import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; -import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -56,6 +46,16 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; +import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + public class QueryStringIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { @@ -91,10 +91,6 @@ public class QueryStringIT extends ESIntegTestCase { resp = client().prepareSearch("test").setQuery(queryStringQuery("Bar")).get(); assertHitCount(resp, 3L); assertHits(resp.getHits(), "1", "2", "3"); - - resp = client().prepareSearch("test").setQuery(queryStringQuery("foa")).get(); - assertHitCount(resp, 1L); - assertHits(resp.getHits(), "3"); } public void testWithDate() throws Exception { @@ -161,8 +157,6 @@ public class QueryStringIT extends ESIntegTestCase { assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(queryStringQuery("Baz")).get(); assertHits(resp.getHits(), "1"); - resp = client().prepareSearch("test").setQuery(queryStringQuery("sbaz")).get(); - assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(queryStringQuery("19")).get(); assertHits(resp.getHits(), "1"); // nested doesn't match because it's hidden @@ -223,11 +217,11 @@ public class QueryStringIT extends ESIntegTestCase { indexRandom(true, false, reqs); SearchResponse resp = client().prepareSearch("test2").setQuery( - queryStringQuery("foo eggplent").defaultOperator(Operator.AND)).get(); + queryStringQuery("foo eggplant").defaultOperator(Operator.AND)).get(); assertHitCount(resp, 0L); resp = client().prepareSearch("test2").setQuery( - queryStringQuery("foo eggplent").defaultOperator(Operator.AND).useAllFields(true)).get(); + queryStringQuery("foo eggplant").defaultOperator(Operator.AND).useAllFields(true)).get(); assertHits(resp.getHits(), "1"); assertHitCount(resp, 1L); diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 51f61a7a9c3..01358dcfb89 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.query; import org.apache.lucene.util.English; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -41,16 +42,20 @@ import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.index.search.MatchQuery.Type; import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; import java.util.Random; import java.util.concurrent.ExecutionException; @@ -102,6 +107,11 @@ import static org.hamcrest.Matchers.is; public class SearchQueryIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Collections.singleton(InternalSettingsPlugin.class); + } + @Override protected int maximumNumberOfShards() { return 7; @@ -545,7 +555,7 @@ public class SearchQueryIT extends ESIntegTestCase { } public void testTypeFilter() throws Exception { - assertAcked(prepareCreate("test").setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate("test").setSettings("index.version.created", Version.V_5_6_0.id)); indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"), client().prepareIndex("test", "type2", "1").setSource("field1", "value1"), client().prepareIndex("test", "type1", "2").setSource("field1", "value1"), @@ -1181,7 +1191,36 @@ public class SearchQueryIT extends ESIntegTestCase { } public void testBasicQueryById() throws Exception { - assertAcked(prepareCreate("test").setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate("test")); + + client().prepareIndex("test", "doc", "1").setSource("field1", "value1").get(); + client().prepareIndex("test", "doc", "2").setSource("field1", "value2").get(); + client().prepareIndex("test", "doc", "3").setSource("field1", "value3").get(); + refresh(); + + SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("doc").addIds("1", "2")).get(); + assertHitCount(searchResponse, 2L); + assertThat(searchResponse.getHits().getHits().length, equalTo(2)); + + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1")).get(); + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + + searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); + assertHitCount(searchResponse, 2L); + assertThat(searchResponse.getHits().getHits().length, equalTo(2)); + + searchResponse = client().prepareSearch().setQuery(idsQuery(Strings.EMPTY_ARRAY).addIds("1")).get(); + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + + searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "doc").addIds("1", "2", "3", "4")).get(); + assertHitCount(searchResponse, 3L); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + } + + public void testBasicQueryByIdMultiType() throws Exception { + assertAcked(prepareCreate("test").setSettings("index.version.created", Version.V_5_6_0.id)); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get(); @@ -1212,6 +1251,7 @@ public class SearchQueryIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getHits().length, equalTo(2)); } + public void testNumericTermsAndRanges() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", @@ -1448,10 +1488,9 @@ public class SearchQueryIT extends ESIntegTestCase { public void testSimpleDFSQuery() throws IOException { assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) - .addMapping("s", jsonBuilder() + .addMapping("doc", jsonBuilder() .startObject() - .startObject("s") + .startObject("doc") .startObject("_routing") .field("required", true) .endObject() @@ -1470,13 +1509,17 @@ public class SearchQueryIT extends ESIntegTestCase { .endObject() .endObject() .endObject()) - .addMapping("bs", "online", "type=boolean", "ts", "type=date,ignore_malformed=false,format=epoch_millis")); + ); - client().prepareIndex("test", "s", "1").setRouting("Y").setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100).get(); - client().prepareIndex("test", "s", "2").setRouting("X").setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000).get(); - client().prepareIndex("test", "bs", "3").setSource("online", false, "ts", System.currentTimeMillis() - 100).get(); - client().prepareIndex("test", "bs", "4").setSource("online", true, "ts", System.currentTimeMillis() - 123123).get(); + client().prepareIndex("test", "doc", "1").setRouting("Y").setSource("online", false, "bs", "Y", "ts", + System.currentTimeMillis() - 100, "type", "s").get(); + client().prepareIndex("test", "doc", "2").setRouting("X").setSource("online", true, "bs", "X", "ts", + System.currentTimeMillis() - 10000000, "type", "s").get(); + client().prepareIndex("test", "doc", "3").setRouting(randomAlphaOfLength(2)) + .setSource("online", false, "ts", System.currentTimeMillis() - 100, "type", "bs").get(); + client().prepareIndex("test", "doc", "4").setRouting(randomAlphaOfLength(2)) + .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs").get(); refresh(); SearchResponse response = client().prepareSearch("test") @@ -1487,11 +1530,11 @@ public class SearchQueryIT extends ESIntegTestCase { .must(boolQuery() .should(boolQuery() .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000))) - .must(termQuery("_type", "bs")) + .must(termQuery("type", "bs")) ) .should(boolQuery() .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000))) - .must(termQuery("_type", "s")) + .must(termQuery("type", "s")) ) ) ) @@ -1620,29 +1663,33 @@ public class SearchQueryIT extends ESIntegTestCase { } public void testQueryStringWithSlopAndFields() { - assertAcked(prepareCreate("test").setSettings("index.mapping.single_type", false)); + assertAcked(prepareCreate("test")); - client().prepareIndex("test", "customer", "1").setSource("desc", "one two three").get(); - client().prepareIndex("test", "product", "2").setSource("desc", "one two three").get(); + client().prepareIndex("test", "doc", "1").setSource("desc", "one two three", "type", "customer").get(); + client().prepareIndex("test", "doc", "2").setSource("desc", "one two three", "type", "product").get(); refresh(); { SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get(); assertHitCount(searchResponse, 2); } { - SearchResponse searchResponse = client().prepareSearch("test").setTypes("product").setQuery(QueryBuilders.queryStringQuery("\"one two\"").field("desc")).get(); + SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) + .setQuery(QueryBuilders.queryStringQuery("\"one two\"").field("desc")).get(); assertHitCount(searchResponse, 1); } { - SearchResponse searchResponse = client().prepareSearch("test").setTypes("product").setQuery(QueryBuilders.queryStringQuery("\"one three\"~5").field("desc")).get(); + SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "product")) + .setQuery(QueryBuilders.queryStringQuery("\"one three\"~5").field("desc")).get(); assertHitCount(searchResponse, 1); } { - SearchResponse searchResponse = client().prepareSearch("test").setTypes("customer").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get(); + SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) + .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get(); assertHitCount(searchResponse, 1); } { - SearchResponse searchResponse = client().prepareSearch("test").setTypes("customer").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get(); + SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) + .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get(); assertHitCount(searchResponse, 1); } } diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index f22ec392b99..a32a8060379 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -398,10 +398,6 @@ public class SimpleQueryStringIT extends ESIntegTestCase { resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get(); assertHitCount(resp, 3L); assertHits(resp.getHits(), "1", "2", "3"); - - resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foa")).get(); - assertHitCount(resp, 1L); - assertHits(resp.getHits(), "3"); } public void testWithDate() throws Exception { @@ -480,8 +476,6 @@ public class SimpleQueryStringIT extends ESIntegTestCase { assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")).get(); assertHits(resp.getHits(), "1"); - resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("sbaz")).get(); - assertHits(resp.getHits(), "1"); resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("19")).get(); assertHits(resp.getHits(), "1"); // nested doesn't match because it's hidden @@ -547,11 +541,11 @@ public class SimpleQueryStringIT extends ESIntegTestCase { indexRandom(true, false, reqs); SearchResponse resp = client().prepareSearch("test").setQuery( - simpleQueryStringQuery("foo eggplent").defaultOperator(Operator.AND)).get(); + simpleQueryStringQuery("foo eggplant").defaultOperator(Operator.AND)).get(); assertHitCount(resp, 0L); resp = client().prepareSearch("test").setQuery( - simpleQueryStringQuery("foo eggplent").defaultOperator(Operator.AND).useAllFields(true)).get(); + simpleQueryStringQuery("foo eggplant").defaultOperator(Operator.AND).useAllFields(true)).get(); assertHits(resp.getHits(), "1"); assertHitCount(resp, 1L); diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index a9d81c72f4a..8e6b9f45cc6 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -81,10 +81,8 @@ public abstract class AbstractSortTestCase> extends EST @BeforeClass public static void init() throws IOException { - Path genericConfigFolder = createTempDir(); Settings baseSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) .build(); Map, Object>> scripts = Collections.singletonMap("dummy", p -> null); ScriptEngine engine = new MockScriptEngine(MockScriptEngine.NAME, scripts); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index 035fd847ad2..5142c25229d 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -445,8 +445,6 @@ public class SuggestSearchIT extends ESIntegTestCase { public void testPrefixLength() throws IOException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, 1) - .put("index.analysis.analyzer.reverse.tokenizer", "standard") - .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse") .put("index.analysis.analyzer.body.tokenizer", "standard") .putArray("index.analysis.analyzer.body.filter", "lowercase") .put("index.analysis.analyzer.bigram.tokenizer", "standard") @@ -458,7 +456,6 @@ public class SuggestSearchIT extends ESIntegTestCase { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties") .startObject("body").field("type", "text").field("analyzer", "body").endObject() - .startObject("body_reverse").field("type", "text").field("analyzer", "reverse").endObject() .startObject("bigram").field("type", "text").field("analyzer", "bigram").endObject() .endObject() .endObject().endObject(); @@ -486,8 +483,6 @@ public class SuggestSearchIT extends ESIntegTestCase { public void testBasicPhraseSuggest() throws IOException, URISyntaxException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder() .put(indexSettings()) - .put("index.analysis.analyzer.reverse.tokenizer", "standard") - .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse") .put("index.analysis.analyzer.body.tokenizer", "standard") .putArray("index.analysis.analyzer.body.filter", "lowercase") .put("index.analysis.analyzer.bigram.tokenizer", "standard") @@ -503,10 +498,6 @@ public class SuggestSearchIT extends ESIntegTestCase { field("type", "text"). field("analyzer", "body") .endObject() - .startObject("body_reverse"). - field("type", "text"). - field("analyzer", "reverse") - .endObject() .startObject("bigram"). field("type", "text"). field("analyzer", "bigram") @@ -536,7 +527,7 @@ public class SuggestSearchIT extends ESIntegTestCase { "Police sergeant who stops the film", }; for (String line : strings) { - index("test", "type1", line, "body", line, "body_reverse", line, "bigram", line); + index("test", "type1", line, "body", line, "bigram", line); } refresh(); @@ -576,14 +567,6 @@ public class SuggestSearchIT extends ESIntegTestCase { searchSuggest = searchSuggest( "Arthur, King of the Britons", "simple_phrase", phraseSuggest); assertSuggestion(searchSuggest, 0, "simple_phrase", "arthur king of the britons"); - //test reverse suggestions with pre & post filter - phraseSuggest - .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")) - .addCandidateGenerator(candidateGenerator("body_reverse").minWordLength(1).suggestMode("always").preFilter("reverse") - .postFilter("reverse")); - searchSuggest = searchSuggest( "Artur, Ging of the Britons", "simple_phrase", phraseSuggest); - assertSuggestion(searchSuggest, 0, "simple_phrase", "arthur king of the britons"); - // set all mass to trigrams (not indexed) phraseSuggest.clearCandidateGenerators() .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always")) @@ -633,8 +616,6 @@ public class SuggestSearchIT extends ESIntegTestCase { public void testSizeParam() throws IOException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, 1) - .put("index.analysis.analyzer.reverse.tokenizer", "standard") - .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse") .put("index.analysis.analyzer.body.tokenizer", "standard") .putArray("index.analysis.analyzer.body.filter", "lowercase") .put("index.analysis.analyzer.bigram.tokenizer", "standard") @@ -652,10 +633,6 @@ public class SuggestSearchIT extends ESIntegTestCase { .field("type", "text") .field("analyzer", "body") .endObject() - .startObject("body_reverse") - .field("type", "text") - .field("analyzer", "reverse") - .endObject() .startObject("bigram") .field("type", "text") .field("analyzer", "bigram") @@ -667,9 +644,9 @@ public class SuggestSearchIT extends ESIntegTestCase { ensureGreen(); String line = "xorr the god jewel"; - index("test", "type1", "1", "body", line, "body_reverse", line, "bigram", line); + index("test", "type1", "1", "body", line, "bigram", line); line = "I got it this time"; - index("test", "type1", "2", "body", line, "body_reverse", line, "bigram", line); + index("test", "type1", "2", "body", line, "bigram", line); refresh(); PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("bigram") diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java index 8c938caa479..f1a630fab37 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java @@ -20,10 +20,13 @@ package org.elasticsearch.search.suggest; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.action.search.RestSearchAction; @@ -171,4 +174,22 @@ public class SuggestTests extends ESTestCase { } } + + public void testParsingExceptionOnUnknownSuggestion() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + { + builder.startArray("unknownSuggestion"); + builder.endArray(); + } + builder.endObject(); + BytesReference originalBytes = builder.bytes(); + try (XContentParser parser = createParser(builder.contentType().xContent(), originalBytes)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + ParsingException ex = expectThrows(ParsingException.class, () -> Suggest.fromXContent(parser)); + assertEquals("Could not parse suggestion keyed as [unknownSuggestion]", ex.getMessage()); + } + } + + } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java index fbf6a889220..3c56597299d 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestionTests.java @@ -132,6 +132,7 @@ public class SuggestionTests extends ESTestCase { try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); parsed = Suggestion.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); @@ -145,19 +146,18 @@ public class SuggestionTests extends ESTestCase { } /** - * test that we throw error if RestSearchAction.TYPED_KEYS_PARAM isn't set while rendering xContent + * test that we parse nothing if RestSearchAction.TYPED_KEYS_PARAM isn't set while rendering xContent and we cannot find + * suggestion type information */ - public void testFromXContentFailsWithoutTypeParam() throws IOException { + public void testFromXContentWithoutTypeParam() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toXContent(createTestItem(), xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); - ParsingException e = expectThrows(ParsingException.class, () -> Suggestion.fromXContent(parser)); - assertEquals( - "Cannot parse object of class [Suggestion] without type information. " - + "Set [typed_keys] parameter on the request to ensure the type information " - + "is added to the response output", e.getMessage()); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); + assertNull(Suggestion.fromXContent(parser)); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser::getTokenLocation); } } @@ -177,6 +177,7 @@ public class SuggestionTests extends ESTestCase { try (XContentParser parser = xContent.createParser(xContentRegistry(), suggestionString)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); + ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation); ParsingException e = expectThrows(ParsingException.class, () -> Suggestion.fromXContent(parser)); assertEquals("Unknown Suggestion [unknownType]", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 76a7bcc1a8f..8ca37145952 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -208,19 +208,16 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest Client client = client(); createIndex("test-idx"); logger.info("--> add custom persistent metadata"); - updateClusterState(new ClusterStateUpdater() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState.Builder builder = ClusterState.builder(currentState); - MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); - metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("before_snapshot_s")); - metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("before_snapshot_ns")); - metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("before_snapshot_s_gw")); - metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("before_snapshot_ns_gw")); - metadataBuilder.putCustom(SnapshotableGatewayNoApiMetadata.TYPE, new SnapshotableGatewayNoApiMetadata("before_snapshot_s_gw_noapi")); - builder.metaData(metadataBuilder); - return builder.build(); - } + updateClusterState(currentState -> { + ClusterState.Builder builder = ClusterState.builder(currentState); + MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); + metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("before_snapshot_s")); + metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("before_snapshot_ns")); + metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("before_snapshot_s_gw")); + metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("before_snapshot_ns_gw")); + metadataBuilder.putCustom(SnapshotableGatewayNoApiMetadata.TYPE, new SnapshotableGatewayNoApiMetadata("before_snapshot_s_gw_noapi")); + builder.metaData(metadataBuilder); + return builder.build(); }); logger.info("--> create repository"); @@ -235,27 +232,24 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); logger.info("--> change custom persistent metadata"); - updateClusterState(new ClusterStateUpdater() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState.Builder builder = ClusterState.builder(currentState); - MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); - if (randomBoolean()) { - metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("after_snapshot_s")); - } else { - metadataBuilder.removeCustom(SnapshottableMetadata.TYPE); - } - metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("after_snapshot_ns")); - if (randomBoolean()) { - metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("after_snapshot_s_gw")); - } else { - metadataBuilder.removeCustom(SnapshottableGatewayMetadata.TYPE); - } - metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("after_snapshot_ns_gw")); - metadataBuilder.removeCustom(SnapshotableGatewayNoApiMetadata.TYPE); - builder.metaData(metadataBuilder); - return builder.build(); + updateClusterState(currentState -> { + ClusterState.Builder builder = ClusterState.builder(currentState); + MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); + if (randomBoolean()) { + metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("after_snapshot_s")); + } else { + metadataBuilder.removeCustom(SnapshottableMetadata.TYPE); } + metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("after_snapshot_ns")); + if (randomBoolean()) { + metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("after_snapshot_s_gw")); + } else { + metadataBuilder.removeCustom(SnapshottableGatewayMetadata.TYPE); + } + metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("after_snapshot_ns_gw")); + metadataBuilder.removeCustom(SnapshotableGatewayNoApiMetadata.TYPE); + builder.metaData(metadataBuilder); + return builder.build(); }); logger.info("--> delete repository"); @@ -510,15 +504,12 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2") .setIndices("test-idx-all", "test-idx-none", "test-idx-some") .setWaitForCompletion(false).setPartial(true).execute().actionGet(); - assertBusy(new Runnable() { - @Override - public void run() { - SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); - List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); - assertEquals(snapshotStatuses.size(), 1); - logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); - assertTrue(snapshotStatuses.get(0).getState().completed()); - } + assertBusy(() -> { + SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); + List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); + assertEquals(snapshotStatuses.size(), 1); + logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); + assertTrue(snapshotStatuses.get(0).getState().completed()); }, 1, TimeUnit.MINUTES); SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); @@ -531,15 +522,12 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest // There is slight delay between snapshot being marked as completed in the cluster state and on the file system // After it was marked as completed in the cluster state - we need to check if it's completed on the file system as well - assertBusy(new Runnable() { - @Override - public void run() { - GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").get(); - assertThat(response.getSnapshots().size(), equalTo(1)); - SnapshotInfo snapshotInfo = response.getSnapshots().get(0); - assertTrue(snapshotInfo.state().completed()); - assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); - } + assertBusy(() -> { + GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").get(); + assertThat(response.getSnapshots().size(), equalTo(1)); + SnapshotInfo snapshotInfo = response.getSnapshots().get(0); + assertTrue(snapshotInfo.state().completed()); + assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); }, 1, TimeUnit.MINUTES); } else { logger.info("checking snapshot completion using wait_for_completion flag"); @@ -779,6 +767,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest assertEquals(0, snapshotInfo.failedShards()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25281") public void testMasterShutdownDuringFailedSnapshot() throws Exception { logger.info("--> starting two master nodes and two data nodes"); internalCluster().startMasterOnlyNodes(2); diff --git a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 44a134857f9..c872c4a39be 100644 --- a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -72,6 +73,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -818,4 +820,90 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } } + + public void testConnectedNodesConcurrentAccess() throws IOException, InterruptedException { + List knownNodes = new CopyOnWriteArrayList<>(); + List discoverableTransports = new CopyOnWriteArrayList<>(); + try { + final int numDiscoverableNodes = randomIntBetween(5, 20); + List discoverableNodes = new ArrayList<>(numDiscoverableNodes); + for (int i = 0; i < numDiscoverableNodes; i++ ) { + MockTransportService transportService = startTransport("discoverable_node" + i, knownNodes, Version.CURRENT); + discoverableNodes.add(transportService.getLocalDiscoNode()); + discoverableTransports.add(transportService); + } + + List seedNodes = randomSubsetOf(discoverableNodes); + Collections.shuffle(seedNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + seedNodes, service, Integer.MAX_VALUE, n -> true)) { + final int numGetThreads = randomIntBetween(4, 10); + final Thread[] getThreads = new Thread[numGetThreads]; + final int numModifyingThreads = randomIntBetween(4, 10); + final Thread[] modifyingThreads = new Thread[numModifyingThreads]; + CyclicBarrier barrier = new CyclicBarrier(numGetThreads + numModifyingThreads); + for (int i = 0; i < getThreads.length; i++) { + final int numGetCalls = randomIntBetween(1000, 10000); + getThreads[i] = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < numGetCalls; j++) { + try { + DiscoveryNode node = connection.getConnectedNode(); + assertNotNull(node); + } catch (IllegalStateException e) { + if (e.getMessage().startsWith("No node available for cluster:") == false) { + throw e; + } + } + } + } catch (Exception ex) { + throw new AssertionError(ex); + } + }); + getThreads[i].start(); + } + + final AtomicInteger counter = new AtomicInteger(); + for (int i = 0; i < modifyingThreads.length; i++) { + final int numDisconnects = randomIntBetween(5, 10); + modifyingThreads[i] = new Thread(() -> { + try { + barrier.await(); + for (int j = 0; j < numDisconnects; j++) { + if (randomBoolean()) { + MockTransportService transportService = + startTransport("discoverable_node_added" + counter.incrementAndGet(), knownNodes, + Version.CURRENT); + discoverableTransports.add(transportService); + connection.addConnectedNode(transportService.getLocalDiscoNode()); + } else { + DiscoveryNode node = randomFrom(discoverableNodes); + connection.onNodeDisconnected(node); + } + } + } catch (Exception ex) { + throw new AssertionError(ex); + } + }); + modifyingThreads[i].start(); + } + + for (Thread thread : getThreads) { + thread.join(); + } + for (Thread thread : modifyingThreads) { + thread.join(); + } + } + } + } finally { + IOUtils.closeWhileHandlingException(discoverableTransports); + } + } + } diff --git a/core/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/core/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 0c4e0c31d6d..646efa9428d 100644 --- a/core/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -410,7 +410,12 @@ public class RemoteClusterServiceTests extends ESTestCase { }); failLatch.await(); assertNotNull(ex.get()); - assertTrue(ex.get().getClass().toString(), ex.get() instanceof TransportException); + if (ex.get() instanceof TransportException == false) { + // we have an issue for this see #25301 + logger.error("expected TransportException but got a different one see #25301", ex.get()); + } + assertTrue("expected TransportException but got a different one [" + ex.get().getClass().toString() + "]", + ex.get() instanceof TransportException); } } } diff --git a/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java index a68416cc25a..6ce6c2a96d6 100644 --- a/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java @@ -235,7 +235,7 @@ public class TCPTransportTests extends ESTestCase { } @Override - public long serverOpen() { + public long getNumOpenServerConnections() { return 0; } diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index f678d4528fe..62b4b3e9f2d 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -386,10 +386,10 @@ public class TribeIT extends ESIntegTestCase { public void testTribeOnOneCluster() throws Exception { try (Releasable tribeNode = startTribeNode()) { // Creates 2 indices, test1 on cluster1 and test2 on cluster2 - assertAcked(cluster1.client().admin().indices().prepareCreate("test1").setSettings("index.mapping.single_type", false)); + assertAcked(cluster1.client().admin().indices().prepareCreate("test1")); ensureGreen(cluster1.client()); - assertAcked(cluster2.client().admin().indices().prepareCreate("test2").setSettings("index.mapping.single_type", false)); + assertAcked(cluster2.client().admin().indices().prepareCreate("test2")); ensureGreen(cluster2.client()); // Wait for the tribe node to retrieve the indices into its cluster state @@ -411,21 +411,6 @@ public class TribeIT extends ESIntegTestCase { assertThat(clusterState.getMetaData().index("test2").mapping("type1"), notNullValue()); }); - // More documents with another type - indexRandom(true, - client().prepareIndex("test1", "type2", "1").setSource("field1", "value1"), - client().prepareIndex("test2", "type2", "1").setSource("field1", "value1") - ); - assertHitCount(client().prepareSearch().get(), 4L); - assertBusy(() -> { - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - assertThat(clusterState.getMetaData().index("test1").mapping("type1"), notNullValue()); - assertThat(clusterState.getMetaData().index("test1").mapping("type2"), notNullValue()); - - assertThat(clusterState.getMetaData().index("test2").mapping("type1"), notNullValue()); - assertThat(clusterState.getMetaData().index("test2").mapping("type2"), notNullValue()); - }); - // Make sure master level write operations fail... (we don't really have a master) expectThrows(MasterNotDiscoveredException.class, () -> { client().admin().indices().prepareCreate("tribe_index").setMasterNodeTimeout("10ms").get(); diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java index d40c4865c90..ac9e3156e1c 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.tribe; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,11 +28,14 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.Environment; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestCustomMetaData; +import org.elasticsearch.transport.MockTcpTransportPlugin; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -64,11 +68,9 @@ public class TribeServiceTests extends ESTestCase { Settings globalSettings = Settings.builder() .put("node.name", "nodename") .put("path.home", "some/path") - .put("path.conf", "conf/path") .put("path.logs", "logs/path").build(); Settings clientSettings = TribeService.buildClientSettings("tribe1", "parent_id", globalSettings, Settings.EMPTY); assertEquals("some/path", clientSettings.get("path.home")); - assertEquals("conf/path", clientSettings.get("path.conf")); assertEquals("logs/path", clientSettings.get("path.logs")); Settings tribeSettings = Settings.builder() @@ -180,6 +182,28 @@ public class TribeServiceTests extends ESTestCase { assertEquals(mergedCustom.getData(), "data2"+String.valueOf(n)); } + public void testTribeNodeDeprecation() throws IOException { + final Path tempDir = createTempDir(); + Settings.Builder settings = Settings.builder() + .put("node.name", "test-node") + .put("path.home", tempDir) + .put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), "mock-socket-network"); + + final boolean tribeServiceEnable = randomBoolean(); + if (tribeServiceEnable) { + String clusterName = "single-node-cluster"; + String tribeSetting = "tribe." + clusterName + "."; + settings.put(tribeSetting + ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName) + .put(tribeSetting + NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), "mock-socket-network"); + } + try (Node node = new MockNode(settings.build(),Collections.singleton(MockTcpTransportPlugin.class) )) { + if (tribeServiceEnable) { + assertWarnings("tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0"); + } + } + } + static class MergableCustomMetaData1 extends TestCustomMetaData implements TribeService.MergableCustomMetaData { public static final String TYPE = "custom_md_1"; diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java index 92234715c1e..dc46ef12e36 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -33,6 +33,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteResponse; @@ -272,7 +273,7 @@ public class UpdateIT extends ESIntegTestCase { assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz")); assertThat(updateResponse.getGetResult().sourceAsMap().get("extra").toString(), equalTo("foo")); } - + public void testIndexAutoCreation() throws Exception { UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) @@ -461,7 +462,7 @@ public class UpdateIT extends ESIntegTestCase { public void testContextVariables() throws Exception { assertAcked(prepareCreate("test") - .setSettings("index.mapping.single_type", false) + .setSettings("index.version.created", Version.V_5_6_0.id) .addAlias(new Alias("alias")) .addMapping("type1", XContentFactory.jsonBuilder() .startObject() diff --git a/core/src/test/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/core/src/test/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 00000000000..8ec7461c667 --- /dev/null +++ b/core/src/test/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1 @@ +org.elasticsearch.plugins.spi.NamedXContentProviderTests$TestNamedXContentProvider \ No newline at end of file diff --git a/core/src/test/resources/indices/bwc/index-5.4.2.zip b/core/src/test/resources/indices/bwc/index-5.4.2.zip new file mode 100644 index 00000000000..c6aef9feeaa Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-5.4.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-5.4.3.zip b/core/src/test/resources/indices/bwc/index-5.4.3.zip new file mode 100644 index 00000000000..0ba6d1e0e1b Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-5.4.3.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-5.4.2.zip b/core/src/test/resources/indices/bwc/repo-5.4.2.zip new file mode 100644 index 00000000000..5e71fe33451 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-5.4.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-5.4.3.zip b/core/src/test/resources/indices/bwc/repo-5.4.3.zip new file mode 100644 index 00000000000..e1de034b67e Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-5.4.3.zip differ diff --git a/core/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.json b/core/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.json index fe5f4d4016c..9cb0bdd6ef1 100644 --- a/core/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.json +++ b/core/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.json @@ -3,11 +3,11 @@ "analysis":{ "analyzer":{ "synonymAnalyzer":{ - "tokenizer":"standard", + "tokenizer":"whitespace", "filter":[ "synonym" ] }, "synonymAnalyzer_file":{ - "tokenizer":"standard", + "tokenizer":"whitespace", "filter":[ "synonym_file" ] }, "synonymAnalyzerWordnet":{ @@ -21,6 +21,26 @@ "synonymAnalyzerWithsettings":{ "tokenizer":"trigram", "filter":["synonymWithTokenizerSettings"] + }, + "synonymAnalyzerWithStopBeforeSynonym": { + "tokenizer":"whitespace", + "filter":["stop","synonym"] + }, + "synonymAnalyzerWithStopAfterSynonym":{ + "tokenizer":"whitespace", + "filter":["synonym","stop"] + }, + "synonymAnalyzerWithStopSynonymAfterSynonym":{ + "tokenizer":"whitespace", + "filter":["synonym","stop_within_synonym"] + }, + "synonymAnalyzerExpand":{ + "tokenizer": "whitespace", + "filter":["synonym_expand"] + }, + "synonymAnalyzerExpandWithStopAfterSynonym":{ + "tokenizer": "whitespace", + "filter":["synonym_expand", "stop_within_synonym"] } }, "tokenizer":{ @@ -61,10 +81,23 @@ "type":"synonym", "synonyms":[ "kimchy => shay" - ], - "tokenizer" : "trigram", - "min_gram" : 3, - "max_gram" : 3 + ] + }, + "stop":{ + "type": "stop", + "stopwords":["stop","synonym"] + }, + "stop_within_synonym":{ + "type": "stop", + "stopwords":["kimchy", "elasticsearch"] + }, + "synonym_expand":{ + "type":"synonym", + "synonyms":[ + "kimchy , shay", + "dude , elasticsearch", + "abides , man!" + ] } } } diff --git a/core/src/test/resources/org/elasticsearch/index/analysis/test1.json b/core/src/test/resources/org/elasticsearch/index/analysis/test1.json index 38937a9b5af..f2b60017721 100644 --- a/core/src/test/resources/org/elasticsearch/index/analysis/test1.json +++ b/core/src/test/resources/org/elasticsearch/index/analysis/test1.json @@ -17,10 +17,6 @@ }, "my":{ "type":"myfilter" - }, - "dict_dec":{ - "type":"dictionary_decompounder", - "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"] } }, "analyzer":{ @@ -43,10 +39,6 @@ "czechAnalyzerWithStemmer":{ "tokenizer":"standard", "filter":["standard", "lowercase", "stop", "czech_stem"] - }, - "decompoundingAnalyzer":{ - "tokenizer":"standard", - "filter":["dict_dec"] } } } diff --git a/core/src/test/resources/org/elasticsearch/index/analysis/test1.yml b/core/src/test/resources/org/elasticsearch/index/analysis/test1.yml index f7a57d14dbe..e9965467251 100644 --- a/core/src/test/resources/org/elasticsearch/index/analysis/test1.yml +++ b/core/src/test/resources/org/elasticsearch/index/analysis/test1.yml @@ -12,9 +12,6 @@ index : stopwords : [stop2-1, stop2-2] my : type : myfilter - dict_dec : - type : dictionary_decompounder - word_list : [donau, dampf, schiff, spargel, creme, suppe] analyzer : standard : type : standard @@ -34,6 +31,3 @@ index : czechAnalyzerWithStemmer : tokenizer : standard filter : [standard, lowercase, stop, czech_stem] - decompoundingAnalyzer : - tokenizer : standard - filter : [dict_dec] diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json b/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json index 1a96fd71333..d9cbb485d13 100644 --- a/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json +++ b/core/src/test/resources/org/elasticsearch/search/query/all-query-index-with-all.json @@ -6,22 +6,7 @@ "version": { "created": "5000099" }, - "analysis": { - "analyzer": { - "my_ngrams": { - "type": "custom", - "tokenizer": "standard", - "filter": ["my_ngrams"] - } - }, - "filter": { - "my_ngrams": { - "type": "ngram", - "min_gram": 2, - "max_gram": 2 - } - } - } + "query.default_field": "f1" } }, "mappings": { @@ -31,7 +16,7 @@ }, "properties": { "f1": {"type": "text"}, - "f2": {"type": "text", "analyzer": "my_ngrams"} + "f2": {"type": "text"} } } } diff --git a/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json b/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json index 86dde5aaf88..89c41217125 100644 --- a/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json +++ b/core/src/test/resources/org/elasticsearch/search/query/all-query-index.json @@ -2,23 +2,7 @@ "settings": { "index": { "number_of_shards": 1, - "number_of_replicas": 0, - "analysis": { - "analyzer": { - "my_ngrams": { - "type": "custom", - "tokenizer": "standard", - "filter": ["my_ngrams"] - } - }, - "filter": { - "my_ngrams": { - "type": "ngram", - "min_gram": 2, - "max_gram": 2 - } - } - } + "number_of_replicas": 0 } }, "mappings": { @@ -26,7 +10,7 @@ "properties": { "f1": {"type": "text"}, "f2": {"type": "keyword"}, - "f3": {"type": "text", "analyzer": "my_ngrams"}, + "f3": {"type": "text"}, "f4": { "type": "text", "index_options": "docs" diff --git a/distribution/build.gradle b/distribution/build.gradle index b33ec60ee83..20064c7919c 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -496,6 +496,8 @@ task run(type: RunTask) { */ Map expansionsForDistribution(distributionType) { final String defaultHeapSize = "2g" + final String packagingPathData = "path.data: /var/lib/elasticsearch" + final String packagingPathLogs = "path.logs: /var/log/elasticsearch" String footer = "# Built for ${project.name}-${project.version} " + "(${distributionType})" @@ -509,6 +511,11 @@ Map expansionsForDistribution(distributionType) { 'integ-test-zip': '$ES_HOME/config', 'def': '/etc/elasticsearch', ], + 'path.data': [ + 'deb': packagingPathData, + 'rpm': packagingPathData, + 'def': '#path.data: /path/to/data' + ], 'path.env': [ 'deb': '/etc/default/elasticsearch', 'rpm': '/etc/sysconfig/elasticsearch', @@ -516,6 +523,11 @@ Map expansionsForDistribution(distributionType) { make an empty string here so the script can properly skip it. */ 'def': '', ], + 'path.logs': [ + 'deb': packagingPathLogs, + 'rpm': packagingPathLogs, + 'def': '#path.logs: /path/to/logs' + ], 'heap.min': defaultHeapSize, 'heap.max': defaultHeapSize, diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 1c833a104ce..5ffd513dc06 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -49,6 +49,9 @@ if (project.name == 'bwc-stable-snapshot') { if (enabled) { apply plugin: 'distribution' + // Not published so no need to assemble + tasks.remove(assemble) + build.dependsOn.remove('assemble') def (String major, String minor, String bugfix) = bwcVersion.split('\\.') def (String currentMajor, String currentMinor, String currentBugfix) = version.split('\\.') @@ -108,8 +111,8 @@ if (enabled) { commandLine = ['git', 'checkout', "upstream/${bwcBranch}"] } - File bwcDeb = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.deb") - File bwcRpm = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.rpm") + File bwcDeb = file("${checkoutDir}/distribution/deb/build/distributions/elasticsearch-${bwcVersion}.deb") + File bwcRpm = file("${checkoutDir}/distribution/rpm/build/distributions/elasticsearch-${bwcVersion}.rpm") File bwcZip = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.zip") task buildBwcVersion(type: GradleBuild) { dependsOn checkoutBwcBranch diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/deb/src/main/packaging/init.d/elasticsearch index 59fbef6f277..9623d363e5b 100755 --- a/distribution/deb/src/main/packaging/init.d/elasticsearch +++ b/distribution/deb/src/main/packaging/init.d/elasticsearch @@ -44,12 +44,6 @@ MAX_OPEN_FILES=65536 # Maximum amount of locked memory #MAX_LOCKED_MEMORY= -# Elasticsearch log directory -LOG_DIR=/var/log/$NAME - -# Elasticsearch data directory -DATA_DIR=/var/lib/$NAME - # Elasticsearch configuration directory CONF_DIR=/etc/$NAME @@ -81,7 +75,7 @@ fi # Define other required variables PID_FILE="$PID_DIR/$NAME.pid" DAEMON=$ES_HOME/bin/elasticsearch -DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR" +DAEMON_OPTS="-d -p $PID_FILE --path.conf $CONF_DIR" export ES_JAVA_OPTS export JAVA_HOME diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/rpm/src/main/packaging/init.d/elasticsearch index 1eeb3431526..bedc5e4079c 100644 --- a/distribution/rpm/src/main/packaging/init.d/elasticsearch +++ b/distribution/rpm/src/main/packaging/init.d/elasticsearch @@ -35,8 +35,6 @@ fi ES_HOME="/usr/share/elasticsearch" MAX_OPEN_FILES=65536 MAX_MAP_COUNT=262144 -LOG_DIR="/var/log/elasticsearch" -DATA_DIR="/var/lib/elasticsearch" CONF_DIR="${path.conf}" PID_DIR="/var/run/elasticsearch" @@ -114,7 +112,7 @@ start() { cd $ES_HOME echo -n $"Starting $prog: " # if not running, start it up here, usually something like "daemon $exec" - daemon --user elasticsearch --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR + daemon --user elasticsearch --pidfile $pidfile $exec -p $pidfile -d --path.conf $CONF_DIR retval=$? echo [ $retval -eq 0 ] && touch $lockfile diff --git a/distribution/src/main/packaging/env/elasticsearch b/distribution/src/main/packaging/env/elasticsearch index 11999ffc7b5..d8bf042caef 100644 --- a/distribution/src/main/packaging/env/elasticsearch +++ b/distribution/src/main/packaging/env/elasticsearch @@ -11,12 +11,6 @@ # Elasticsearch configuration directory #CONF_DIR=${path.conf} -# Elasticsearch data directory -#DATA_DIR=/var/lib/elasticsearch - -# Elasticsearch logs directory -#LOG_DIR=/var/log/elasticsearch - # Elasticsearch PID directory #PID_DIR=/var/run/elasticsearch diff --git a/distribution/src/main/packaging/systemd/elasticsearch.service b/distribution/src/main/packaging/systemd/elasticsearch.service index 623b41d7845..98fea5defad 100644 --- a/distribution/src/main/packaging/systemd/elasticsearch.service +++ b/distribution/src/main/packaging/systemd/elasticsearch.service @@ -7,8 +7,6 @@ After=network-online.target [Service] Environment=ES_HOME=/usr/share/elasticsearch Environment=CONF_DIR=${path.conf} -Environment=DATA_DIR=/var/lib/elasticsearch -Environment=LOG_DIR=/var/log/elasticsearch Environment=PID_DIR=/var/run/elasticsearch EnvironmentFile=-${path.env} @@ -22,9 +20,7 @@ ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec ExecStart=/usr/share/elasticsearch/bin/elasticsearch \ -p ${PID_DIR}/elasticsearch.pid \ --quiet \ - -Edefault.path.logs=${LOG_DIR} \ - -Edefault.path.data=${DATA_DIR} \ - -Edefault.path.conf=${CONF_DIR} + --path.conf ${CONF_DIR} # StandardOutput is configured to redirect to journalctl since # some error messages may be logged in standard output before diff --git a/distribution/src/main/resources/bin/elasticsearch-plugin b/distribution/src/main/resources/bin/elasticsearch-plugin index 098d9124498..463521bcbdd 100755 --- a/distribution/src/main/resources/bin/elasticsearch-plugin +++ b/distribution/src/main/resources/bin/elasticsearch-plugin @@ -85,7 +85,7 @@ declare -a args=("$@") path_props=(-Des.path.home="$ES_HOME") if [ -e "$CONF_DIR" ]; then - path_props=("${path_props[@]}" -Des.path.conf="$CONF_DIR") + args=("${args[@]}" --path.conf "$CONF_DIR") fi exec "$JAVA" $ES_JAVA_OPTS -Delasticsearch "${path_props[@]}" -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginCli "${args[@]}" diff --git a/distribution/src/main/resources/bin/elasticsearch-service-x86.exe b/distribution/src/main/resources/bin/elasticsearch-service-x86.exe deleted file mode 100644 index 4240720018b..00000000000 Binary files a/distribution/src/main/resources/bin/elasticsearch-service-x86.exe and /dev/null differ diff --git a/distribution/src/main/resources/bin/elasticsearch-service.bat b/distribution/src/main/resources/bin/elasticsearch-service.bat index d06de4c5bea..e8fb6cd235a 100644 --- a/distribution/src/main/resources/bin/elasticsearch-service.bat +++ b/distribution/src/main/resources/bin/elasticsearch-service.bat @@ -27,34 +27,18 @@ if not "%CONF_FILE%" == "" goto conffileset set SCRIPT_DIR=%~dp0 for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI -%JAVA% -Xmx50M -version > nul 2>&1 - -if errorlevel 1 ( - echo Warning: Could not start JVM to detect version, defaulting to x86: - goto x86 -) - -%JAVA% -Xmx50M -version 2>&1 | "%windir%\System32\find" "64-Bit" >nul: - -if errorlevel 1 goto x86 set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x64.exe set SERVICE_ID=elasticsearch-service-x64 set ARCH=64-bit -goto checkExe -:x86 -set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x86.exe -set SERVICE_ID=elasticsearch-service-x86 -set ARCH=32-bit - -:checkExe if EXIST "%EXECUTABLE%" goto okExe -echo elasticsearch-service-(x86|x64).exe was not found... +echo elasticsearch-service-x64.exe was not found... +exit /B 1 :okExe set ES_VERSION=${project.version} -if "%LOG_DIR%" == "" set LOG_DIR=%ES_HOME%\logs +if "%SERVICE_LOG_DIR%" == "" set SERVICE_LOG_DIR=%ES_HOME%\logs if "x%1x" == "xx" goto displayUsage set SERVICE_CMD=%1 @@ -64,7 +48,7 @@ set SERVICE_ID=%1 :checkServiceCmd -if "%LOG_OPTS%" == "" set LOG_OPTS=--LogPath "%LOG_DIR%" --LogPrefix "%SERVICE_ID%" --StdError auto --StdOutput auto +if "%LOG_OPTS%" == "" set LOG_OPTS=--LogPath "%SERVICE_LOG_DIR%" --LogPrefix "%SERVICE_ID%" --StdError auto --StdOutput auto if /i %SERVICE_CMD% == install goto doInstall if /i %SERVICE_CMD% == remove goto doRemove @@ -222,11 +206,10 @@ if "%JVM_SS%" == "" ( ) CALL "%ES_HOME%\bin\elasticsearch.in.bat" -if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config -set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.conf="%CONF_DIR%" +set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%" if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0 @@ -240,7 +223,7 @@ if not "%SERVICE_USERNAME%" == "" ( ) ) -"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %ES_JAVA_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" %SERVICE_PARAMS% +"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StopClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %ES_JAVA_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" --StartParams --path.conf ++StartParams "%CONF_DIR%" %SERVICE_PARAMS% if not errorlevel 1 goto installed echo Failed installing '%SERVICE_ID%' service diff --git a/distribution/src/main/resources/bin/elasticsearch-translog b/distribution/src/main/resources/bin/elasticsearch-translog index 47a48f02b47..ac0c9bb3270 100755 --- a/distribution/src/main/resources/bin/elasticsearch-translog +++ b/distribution/src/main/resources/bin/elasticsearch-translog @@ -84,7 +84,7 @@ export HOSTNAME declare -a args=("$@") if [ -e "$CONF_DIR" ]; then - args=("${args[@]}" -Edefault.path.conf="$CONF_DIR") + args=("${args[@]}" --path.conf "$CONF_DIR") fi exec "$JAVA" $ES_JAVA_OPTS -Delasticsearch -Des.path.home="$ES_HOME" -cp "$ES_HOME/lib/*" org.elasticsearch.index.translog.TranslogToolCli "${args[@]}" diff --git a/distribution/src/main/resources/bin/elasticsearch.in.bat b/distribution/src/main/resources/bin/elasticsearch.in.bat index a2500833872..d7b985d0824 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.bat +++ b/distribution/src/main/resources/bin/elasticsearch.in.bat @@ -3,7 +3,7 @@ IF DEFINED JAVA_HOME ( set JAVA="%JAVA_HOME%\bin\java.exe" ) ELSE ( - FOR %%I IN (java.exe) DO set JAVA=%%~$PATH:I + FOR %%I IN (java.exe) DO set JAVA="%%~$PATH:I" ) IF NOT EXIST %JAVA% ( ECHO Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME 1>&2 diff --git a/distribution/src/main/resources/config/elasticsearch.yml b/distribution/src/main/resources/config/elasticsearch.yml index 15e841fe390..8d4527e39bd 100644 --- a/distribution/src/main/resources/config/elasticsearch.yml +++ b/distribution/src/main/resources/config/elasticsearch.yml @@ -30,11 +30,11 @@ # # Path to directory where to store the data (separate multiple locations by comma): # -#path.data: /path/to/data +${path.data} # # Path to log files: # -#path.logs: /path/to/logs +${path.logs} # # ----------------------------------- Memory ----------------------------------- # diff --git a/distribution/src/main/resources/config/jvm.options b/distribution/src/main/resources/config/jvm.options index 6d265fe7766..e0e362beea2 100644 --- a/distribution/src/main/resources/config/jvm.options +++ b/distribution/src/main/resources/config/jvm.options @@ -47,10 +47,10 @@ ## basic -# force the server VM (remove on 32-bit client JVMs) +# force the server VM -server -# explicitly set the stack size (reduce to 320k on 32-bit client JVMs) +# explicitly set the stack size -Xss1m # set to headless, just in case diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 5a6ae0d04eb..97e8421e98e 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -11,28 +11,15 @@ release-state can be: released | prerelease | unreleased :release-state: prerelease -:ref: https://www.elastic.co/guide/en/elasticsearch/reference/{branch} :defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master :painless: https://www.elastic.co/guide/en/elasticsearch/painless/master :plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/{branch} -:javaclient: https://www.elastic.co/guide/en/elasticsearch/client/java-api/{branch} -:xpack-ref: https://www.elastic.co/guide/en/x-pack/{branch} -:logstash: https://www.elastic.co/guide/en/logstash/{branch} -:kibana: https://www.elastic.co/guide/en/kibana/{branch} :issue: https://github.com/elastic/elasticsearch/issues/ :pull: https://github.com/elastic/elasticsearch/pull/ :docker-image: docker.elastic.co/elasticsearch/elasticsearch:{version} :plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins -:xpack: X-Pack -:xpackml: X-Pack machine learning -:ml: machine learning -:es: Elasticsearch -:kib: Kibana - -:xes-repo-dir: {docdir}/../../../elasticsearch-extra/x-pack-elasticsearch/docs/en - /////// Javadoc roots used to generate links from Painless's API reference /////// @@ -50,3 +37,30 @@ ifeval::["{release-state}"!="unreleased"] :elasticsearch-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/elasticsearch/{version} :painless-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/painless/lang-painless/{version} endif::[] + +////////// +The following attributes are synchronized across multiple books +////////// +:ref: https://www.elastic.co/guide/en/elasticsearch/reference/{branch} +:xpack-ref: https://www.elastic.co/guide/en/x-pack/{branch} +:logstash-ref: http://www.elastic.co/guide/en/logstash/{branch} +:kibana-ref: https://www.elastic.co/guide/en/kibana/{branch} +:stack-ref: http://www.elastic.co/guide/en/elastic-stack/{branch} +:javaclient: https://www.elastic.co/guide/en/elasticsearch/client/java-api/{branch} + +:xpack: X-Pack +:es: Elasticsearch +:kib: Kibana + +:security: X-Pack security +:monitoring: X-Pack monitoring +:watcher: Watcher +:reporting: X-Pack reporting +:graph: X-Pack graph +:searchprofiler: X-Pack search profiler +:xpackml: X-Pack machine learning +:ml: machine learning +:dfeed: datafeed +:dfeeds: datafeeds +:dfeed-cap: Datafeed +:dfeeds-cap: Datafeeds diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index d95766bb190..03561a8b23a 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -37,6 +37,10 @@ normalization can be specified with the `name` parameter, which accepts `nfc`, `nfkc`, and `nfkc_cf` (default). Set the `mode` parameter to `decompose` to convert `nfc` to `nfd` or `nfkc` to `nfkd` respectively: +Which letters are normalized can be controlled by specifying the +`unicodeSetFilter` parameter, which accepts a +http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet]. + Here are two examples, the default usage and a customised character filter: @@ -189,6 +193,10 @@ without any further configuration. The type of normalization can be specified with the `name` parameter, which accepts `nfc`, `nfkc`, and `nfkc_cf` (default). +Which letters are normalized can be controlled by specifying the +`unicodeSetFilter` parameter, which accepts a +http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet]. + You should probably prefer the <>. Here are two examples, the default usage and a customised token filter: diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 209c39a6196..4cfc5ab7539 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -41,13 +41,13 @@ releases 2.0 and later do not support rivers. [float] ==== Supported by Elasticsearch: -* {logstash}/plugins-outputs-elasticsearch.html[Logstash output to Elasticsearch]: +* {logstash-ref}/plugins-outputs-elasticsearch.html[Logstash output to Elasticsearch]: The Logstash `elasticsearch` output plugin. -* {logstash}/plugins-inputs-elasticsearch.html[Elasticsearch input to Logstash] +* {logstash-ref}/plugins-inputs-elasticsearch.html[Elasticsearch input to Logstash] The Logstash `elasticsearch` input plugin. -* {logstash}/plugins-filters-elasticsearch.html[Elasticsearch event filtering in Logstash] +* {logstash-ref}/plugins-filters-elasticsearch.html[Elasticsearch event filtering in Logstash] The Logstash `elasticsearch` filter plugin. -* {logstash}/plugins-codecs-es_bulk.html[Elasticsearch bulk codec] +* {logstash-ref}/plugins-codecs-es_bulk.html[Elasticsearch bulk codec] The Logstash `es_bulk` plugin decodes the Elasticsearch bulk format into individual events. [float] diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index 20c62a5861a..933f6f69f62 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -76,6 +76,15 @@ The following settings are supported: the pattern with the hostname of the node at runtime (see link:repository-hdfs-security-runtime[Creating the Secure Repository]). +[[repository-hdfs-availability]] +[float] +===== A Note on HDFS Availablility +When you initialize a repository, its settings are persisted in the cluster state. When a node comes online, it will +attempt to initialize all repositories for which it has settings. If your cluster has an HDFS repository configured, then +all nodes in the cluster must be able to reach HDFS when starting. If not, then the node will fail to initialize the +repository at start up and the repository will be unusable. If this happens, you will need to remove and re-add the +repository or restart the offending node. + [[repository-hdfs-security]] ==== Hadoop Security diff --git a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc index f386af209b1..fec2fe41d4f 100644 --- a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc @@ -20,10 +20,10 @@ A `percentiles_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE -.`sum_bucket` Parameters +.`percentiles_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the percentiles for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional | `skip` diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index e674f8bf0e7..8221ae7cf3f 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -241,13 +241,13 @@ The output from the above is: }, "hits": { "total": 1, - "max_score": 0.2824934, + "max_score": 0.2876821, "hits": [ { "_index": "my_index", "_type": "my_type", "_id": "1", - "_score": 0.2824934, + "_score": 0.2876821, "_source": { "text": "The fooBarBaz method" }, diff --git a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc index 33191805fe6..e178181d147 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc @@ -46,7 +46,7 @@ Where the file looks like: [source,stemmer_override] -------------------------------------------------- -include::{docdir}/../src/test/cluster/config/analysis/stemmer_override.txt[] +include::{es-test-dir}/cluster/config/analysis/stemmer_override.txt[] -------------------------------------------------- You can also define the overrides rules inline: diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 09707fdeb1c..e1f77332fd4 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -50,11 +50,14 @@ PUT /test_index The above configures a `search_synonyms` filter, with a path of `analysis/synonym.txt` (relative to the `config` location). The `search_synonyms` analyzer is then configured with the filter. -Additional settings are: `ignore_case` (defaults to `false`), and -`expand` (defaults to `true`). +Additional settings are: `expand` (defaults to `true`). + +[float] +==== `tokenizer` and `ignore_case` are deprecated The `tokenizer` parameter controls the tokenizers that will be used to -tokenize the synonym, and defaults to the `whitespace` tokenizer. +tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0.. +The `ignore_case` parameter works with `tokenizer` parameter only. Two synonym formats are supported: Solr, WordNet. @@ -65,7 +68,7 @@ The following is a sample format of the file: [source,synonyms] -------------------------------------------------- -include::{docdir}/../src/test/cluster/config/analysis/synonym.txt[] +include::{es-test-dir}/cluster/config/analysis/synonym.txt[] -------------------------------------------------- You can also define synonyms for the filter directly in the diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index c4961d1e5f9..68d3f444b2d 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -34,11 +34,17 @@ PUT /test_index The above configures a `synonym` filter, with a path of `analysis/synonym.txt` (relative to the `config` location). The `synonym` analyzer is then configured with the filter. Additional -settings are: `ignore_case` (defaults to `false`), and `expand` -(defaults to `true`). +settings is: `expand` (defaults to `true`). + +This filter tokenize synonyms with whatever tokenizer and token filters +appear before it in the chain. + +[float] +==== `tokenizer` and `ignore_case` are deprecated The `tokenizer` parameter controls the tokenizers that will be used to -tokenize the synonym, and defaults to the `whitespace` tokenizer. +tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0.. +The `ignore_case` parameter works with `tokenizer` parameter only. Two synonym formats are supported: Solr, WordNet. @@ -49,7 +55,7 @@ The following is a sample format of the file: [source,synonyms] -------------------------------------------------- -include::{docdir}/../src/test/cluster/config/analysis/synonym.txt[] +include::{es-test-dir}/cluster/config/analysis/synonym.txt[] -------------------------------------------------- You can also define synonyms for the filter directly in the diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index f1e0899d7ab..add0abdec01 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -99,14 +99,14 @@ terms. <>:: -The `simplepattern` tokenizer uses a regular expression to capture matching +The `simple_pattern` tokenizer uses a regular expression to capture matching text as terms. It uses a restricted subset of regular expression features and is generally faster than the `pattern` tokenizer. <>:: -The `simplepatternsplit` tokenizer uses the same restricted regular expression -subset as the `simplepattern` tokenizer, but splits the input at matches rather +The `simple_pattern_split` tokenizer uses the same restricted regular expression +subset as the `simple_pattern` tokenizer, but splits the input at matches rather than returning the matches as terms. <>:: diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 3ef526325e7..b43b4518b8d 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -300,13 +300,13 @@ GET my_index/_search }, "hits": { "total": 1, - "max_score": 0.51623213, + "max_score": 0.5753642, "hits": [ { "_index": "my_index", "_type": "doc", "_id": "1", - "_score": 0.51623213, + "_score": 0.5753642, "_source": { "title": "Quick Foxes" } diff --git a/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc index bee92c75d26..3f235fa6358 100644 --- a/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc @@ -3,7 +3,7 @@ experimental[] -The `simplepattern` tokenizer uses a regular expression to capture matching +The `simple_pattern` tokenizer uses a regular expression to capture matching text as terms. The set of regular expression features it supports is more limited than the <> tokenizer, but the tokenization is generally faster. @@ -11,7 +11,7 @@ tokenization is generally faster. This tokenizer does not support splitting the input on a pattern match, unlike the <> tokenizer. To split on pattern matches using the same restricted regular expression subset, see the -<> tokenizer. +<> tokenizer. This tokenizer uses {lucene-core-javadoc}/org/apache/lucene/util/automaton/RegExp.html[Lucene regular expressions]. For an explanation of the supported features and syntax, see <>. @@ -22,7 +22,7 @@ tokenizer should always be configured with a non-default pattern. [float] === Configuration -The `simplepattern` tokenizer accepts the following parameters: +The `simple_pattern` tokenizer accepts the following parameters: [horizontal] `pattern`:: @@ -31,7 +31,7 @@ The `simplepattern` tokenizer accepts the following parameters: [float] === Example configuration -This example configures the `simplepattern` tokenizer to produce terms that are +This example configures the `simple_pattern` tokenizer to produce terms that are three-digit numbers [source,js] @@ -47,7 +47,7 @@ PUT my_index }, "tokenizer": { "my_tokenizer": { - "type": "simplepattern", + "type": "simple_pattern", "pattern": "[0123456789]{3}" } } diff --git a/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc index c009f8cb7a4..59b77936cb9 100644 --- a/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc @@ -3,14 +3,14 @@ experimental[] -The `simplepatternsplit` tokenizer uses a regular expression to split the +The `simple_pattern_split` tokenizer uses a regular expression to split the input into terms at pattern matches. The set of regular expression features it supports is more limited than the <> tokenizer, but the tokenization is generally faster. This tokenizer does not produce terms from the matches themselves. To produce terms from matches using patterns in the same restricted regular expression -subset, see the <> +subset, see the <> tokenizer. This tokenizer uses {lucene-core-javadoc}/org/apache/lucene/util/automaton/RegExp.html[Lucene regular expressions]. @@ -23,7 +23,7 @@ pattern. [float] === Configuration -The `simplepatternsplit` tokenizer accepts the following parameters: +The `simple_pattern_split` tokenizer accepts the following parameters: [horizontal] `pattern`:: @@ -32,7 +32,7 @@ The `simplepatternsplit` tokenizer accepts the following parameters: [float] === Example configuration -This example configures the `simplepatternsplit` tokenizer to split the input +This example configures the `simple_pattern_split` tokenizer to split the input text on underscores. [source,js] @@ -48,7 +48,7 @@ PUT my_index }, "tokenizer": { "my_tokenizer": { - "type": "simplepatternsplit", + "type": "simple_pattern_split", "pattern": "_" } } diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 7176f885831..2493988b783 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -20,8 +20,8 @@ API, unless otherwise specified. Most APIs that refer to an `index` parameter support execution across multiple indices, using simple `test1,test2,test3` notation (or `_all` for all indices). It also -support wildcards, for example: `test*` or `*test` or `te*t` or `*test*`, and the ability to "add" (`+`) -and "remove" (`-`), for example: `+test*,-test3`. +support wildcards, for example: `test*` or `*test` or `te*t` or `*test*`, and the +ability to "exclude" (`-`), for example: `test*,-test3`. All multi indices API support the following url query string parameters: diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index 1dbcd455351..95621110916 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -71,7 +71,10 @@ veJR 127.0.0.1 59938 {version} * |`version` |`v` |No |Elasticsearch version |{version} |`build` |`b` |No |Elasticsearch Build hash |5c03844 |`jdk` |`j` |No |Running Java version |1.8.0 -|`disk.avail` |`d`, `disk`, `diskAvail` |No |Available disk space |1.8gb +|`disk.total` |`dt`, `diskTotal` |No |Total disk space| 458.3gb +|`disk.used` |`du`, `diskUsed` |No |Used disk space| 259.8gb +|`disk.avail` |`d`, `disk`, `diskAvail` |No |Available disk space |198.4gb +|`disk.used_percent` |`dup`, `diskUsedPercent` |No |Used disk space percentage |56.71 |`heap.current` |`hc`, `heapCurrent` |No |Used heap |311.2mb |`heap.percent` |`hp`, `heapPercent` |Yes |Used heap percentage |7 |`heap.max` |`hm`, `heapMax` |No |Maximum configured heap |1015.6mb diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 608a462e1f3..ee3b30d704e 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -111,9 +111,14 @@ java -version echo $JAVA_HOME -------------------------------------------------- -Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. +Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, a `DEB` or `RPM` package, or a Windows `MSI` installation package. -Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package): +[float] +=== Installation example with tar + +For simplicity, let's use the <> file. + +Let's download the Elasticsearch {version} tar as follows: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- @@ -121,7 +126,7 @@ curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{v -------------------------------------------------- // NOTCONSOLE -Then extract it as follows (Windows users should unzip the zip package): +Then extract it as follows: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- @@ -135,14 +140,74 @@ It will then create a bunch of files and folders in your current directory. We t cd elasticsearch-{version}/bin -------------------------------------------------- -And now we are ready to start our node and single cluster (Windows users should run the elasticsearch.bat file): +And now we are ready to start our node and single cluster: [source,sh] -------------------------------------------------- ./elasticsearch -------------------------------------------------- -If everything goes well, you should see a bunch of messages that look like below: +[float] +=== Installation example with MSI Windows Installer + +For Windows users, we recommend using the <>. The package contains a graphical user interface (GUI) that guides you through the installation process. + +First, download the Elasticsearch {version} MSI from +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.msi. + +Then double-click the downloaded file to launch the GUI. Within the first screen, select the deployment directories: + +[[getting-started-msi-installer-locations]] +image::images/msi_installer/msi_installer_locations.png[] + +Then select whether to install as a service or start Elasticsearch manually as needed. +To align with the tar example, choose not to install as a service: + +[[getting-started-msi-installer-service]] +image::images/msi_installer/msi_installer_no_service.png[] + +For configuration, simply leave the default values: + +[[getting-started-msi-installer-configuration]] +image::images/msi_installer/msi_installer_configuration.png[] + +Again, to align with the tar example, uncheck all plugins to not install any plugins: + +[[getting-started-msi-installer-plugins]] +image::images/msi_installer/msi_installer_plugins.png[] + +After clicking the install button, Elasticsearch will be installed: + +[[getting-started-msi-installer-success]] +image::images/msi_installer/msi_installer_success.png[] + +By default, Elasticsearch will be installed at `%PROGRAMFILES%\Elastic\Elasticsearch`. Navigate here and go into the bin directory as follows: + +**with Command Prompt:** + +[source,sh] +-------------------------------------------------- +cd %PROGRAMFILES%\Elastic\Elasticsearch\bin +-------------------------------------------------- + +**with PowerShell:** + +[source,powershell] +-------------------------------------------------- +cd $env:PROGRAMFILES\Elastic\Elasticsearch\bin +-------------------------------------------------- + +And now we are ready to start our node and single cluster: + +[source,sh] +-------------------------------------------------- +.\elasticsearch.exe +-------------------------------------------------- + +[float] +=== Successfully running node + +If everything goes well with installation, you should see a bunch of messages that look like below: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- @@ -199,7 +264,7 @@ Now that we have our node (and cluster) up and running, the next step is to unde Let's start with a basic health check, which we can use to see how our cluster is doing. We'll be using curl to do this but you can use any tool that allows you to make HTTP/REST calls. Let's assume that we are still on the same node where we started Elasticsearch on and open another command shell window. To check the cluster health, we will be using the <>. You can -run the command below in {kibana}/console-kibana.html[Kibana's Console] +run the command below in {kibana-ref}/console-kibana.html[Kibana's Console] by clicking "VIEW IN CONSOLE" or with `curl` by clicking the "COPY AS CURL" link below and pasting it into a terminal. diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index 59e82d7efe1..a9dd7501a72 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -158,3 +158,24 @@ on disk usage. In particular, integers should be stored using an integer type stored in a `scaled_float` if appropriate or in the smallest type that fits the use-case: using `float` over `double`, or `half_float` over `float` will help save storage. + +[float] +=== Use index sorting to colocate similar documents + +When Elasticsearch stores `_source`, it compresses multiple documents at once +in order to improve the overall compression ratio. For instance it is very +common that documents share the same field names, and quite common that they +share some field values, especially on fields that have a low cardinality or +a https://en.wikipedia.org/wiki/Zipf%27s_law[zipfian] distribution. + +By default documents are compressed together in the order that they are added +to the index. If you enabled <> +then instead they are compressed in sorted order. Sorting documents with similar +structure, fields, and values together should improve the compression ratio. + +[float] +=== Put fields in the same order in documents + +Due to the fact that multiple documents are compressed together into blocks, +it is more likely to find longer duplicate strings in those `_source` documents +if fields always occur in the same order. diff --git a/docs/reference/how-to/recipes.asciidoc b/docs/reference/how-to/recipes.asciidoc index 4d1a4b67a2b..913fb80bea6 100644 --- a/docs/reference/how-to/recipes.asciidoc +++ b/docs/reference/how-to/recipes.asciidoc @@ -88,13 +88,13 @@ GET index/_search }, "hits": { "total": 2, - "max_score": 0.25811607, + "max_score": 0.2876821, "hits": [ { "_index": "index", "_type": "type", "_id": "2", - "_score": 0.25811607, + "_score": 0.2876821, "_source": { "body": "A pair of skis" } @@ -103,7 +103,7 @@ GET index/_search "_index": "index", "_type": "type", "_id": "1", - "_score": 0.25811607, + "_score": 0.2876821, "_source": { "body": "Ski resort" } @@ -145,13 +145,13 @@ GET index/_search }, "hits": { "total": 1, - "max_score": 0.25811607, + "max_score": 0.2876821, "hits": [ { "_index": "index", "_type": "type", "_id": "1", - "_score": 0.25811607, + "_score": 0.2876821, "_source": { "body": "Ski resort" } @@ -201,13 +201,13 @@ GET index/_search }, "hits": { "total": 1, - "max_score": 0.25811607, + "max_score": 0.2876821, "hits": [ { "_index": "index", "_type": "type", "_id": "1", - "_score": 0.25811607, + "_score": 0.2876821, "_source": { "body": "Ski resort" } diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 42a03dd8fd2..60168ab856d 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -326,3 +326,45 @@ queries, they should be mapped as a `keyword`. <> can be useful in order to make conjunctions faster at the cost of slightly slower indexing. Read more about it in the <>. + +[float] +=== Use `preference` to optimize cache utilization + +There are multiple caches that can help with search performance, such as the +https://en.wikipedia.org/wiki/Page_cache[filesystem cache], the +<> or the <>. Yet +all these caches are maintained at the node level, meaning that if you run the +same request twice in a row, have 1 <> or more +and use https://en.wikipedia.org/wiki/Round-robin_DNS[round-robin], the default +routing algorithm, then those two requests will go to different shard copies, +preventing node-level caches from helping. + +Since it is common for users of a search application to run similar requests +one after another, for instance in order to analyze a narrower subset of the +index, using a preference value that identifies the current user or session +could help optimize usage of the caches. + +[float] +=== Replicas might help with throughput, but not always + +In addition to improving resiliency, replicas can help improve throughput. For +instance if you have a single-shard index and three nodes, you will need to +set the number of replicas to 2 in order to have 3 copies of your shard in +total so that all nodes are utilized. + +Now imagine that you have a 2-shards index and two nodes. In one case, the +number of replicas is 0, meaning that each node holds a single shard. In the +second case the number of replicas is 1, meaning that each node has two shards. +Which setup is going to perform best in terms of search performance? Usually, +the setup that has fewer shards per node in total will perform better. The +reason for that is that it gives a greater share of the available filesystem +cache to each shard, and the filesystem cache is probably Elasticsearch's +number 1 performance factor. At the same time, beware that a setup that does +not have replicas is subject to failure in case of a single node failure, so +there is a trade-off between throughput and availability. + +So what is the right number of replicas? If you have a cluster that has +`num_nodes` nodes, `num_primaries` primary shards _in total_ and if you want to +be able to cope with `max_failures` node failures at once at most, then the +right number of replicas for you is +`max(max_failures, ceil(num_nodes / num_primaries) - 1)`. diff --git a/docs/reference/images/msi_installer/elasticsearch_exe.png b/docs/reference/images/msi_installer/elasticsearch_exe.png new file mode 100644 index 00000000000..1c0aacb58c0 Binary files /dev/null and b/docs/reference/images/msi_installer/elasticsearch_exe.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_configuration.png b/docs/reference/images/msi_installer/msi_installer_configuration.png new file mode 100644 index 00000000000..d5502dc7cca Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_configuration.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_help.png b/docs/reference/images/msi_installer/msi_installer_help.png new file mode 100644 index 00000000000..9b63c512100 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_help.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_installed_service.png b/docs/reference/images/msi_installer/msi_installer_installed_service.png new file mode 100644 index 00000000000..34585377a91 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_installed_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_locations.png b/docs/reference/images/msi_installer/msi_installer_locations.png new file mode 100644 index 00000000000..6ccb3dcb23f Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_locations.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_no_service.png b/docs/reference/images/msi_installer/msi_installer_no_service.png new file mode 100644 index 00000000000..d26bb75f265 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_no_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_plugins.png b/docs/reference/images/msi_installer/msi_installer_plugins.png new file mode 100644 index 00000000000..fc3b29677dd Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_selected_plugins.png b/docs/reference/images/msi_installer/msi_installer_selected_plugins.png new file mode 100644 index 00000000000..c39631eb6cc Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_selected_plugins.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_service.png b/docs/reference/images/msi_installer/msi_installer_service.png new file mode 100644 index 00000000000..a964abf6ed5 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_service.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_success.png b/docs/reference/images/msi_installer/msi_installer_success.png new file mode 100644 index 00000000000..1c391e3dfd1 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_success.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_uninstall.png b/docs/reference/images/msi_installer/msi_installer_uninstall.png new file mode 100644 index 00000000000..d612b51d7a1 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_uninstall.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png b/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png new file mode 100644 index 00000000000..04a5e92cc39 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_upgrade_configuration.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png b/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png new file mode 100644 index 00000000000..2b3ac7ea118 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_upgrade_notice.png differ diff --git a/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png b/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png new file mode 100644 index 00000000000..ef6e93bb849 Binary files /dev/null and b/docs/reference/images/msi_installer/msi_installer_upgrade_plugins.png differ diff --git a/docs/reference/index-all.asciidoc b/docs/reference/index-all.asciidoc deleted file mode 100644 index 65346330374..00000000000 --- a/docs/reference/index-all.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[[elasticsearch-reference]] -= Elasticsearch Reference - -:include-xpack: true - -include::index-shared.asciidoc[] diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 9f1999d6acf..ed91307e2c9 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -120,7 +120,7 @@ specific index module: `index.max_rescore_window`:: - The maximum value of `window_size` for `rescore`s in searches of this index. + The maximum value of `window_size` for `rescore` requests in searches of this index. Defaults to `index.max_result_window` which defaults to `10000`. Search requests take heap memory and time proportional to `max(window_size, from + size)` and this limits that memory. diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 9889d112068..66919597d2c 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -20,16 +20,6 @@ replaying its operations take a considerable amount of time during recovery. It is also exposed through an API, though its rarely needed to be performed manually. -[float] -=== Flush settings - -The following <> settings -control how often the in-memory buffer is flushed to disk: - -`index.translog.flush_threshold_size`:: - -Once the translog hits this size, a flush will happen. Defaults to `512mb`. - [float] === Translog settings @@ -72,6 +62,26 @@ update, or bulk request. This setting accepts the following parameters: automatic commit will be discarded. -- +`index.translog.flush_threshold_size`:: + +The translog stores all operations that are not yet safely persisted in Lucene (i.e., are +not part of a lucene commit point). Although these operations are available for reads, they will +need to be reindexed if the shard was to shutdown and has to be recovered. This settings controls +the maximum total size of these operations, to prevent recoveries from taking too long. Once the +maximum size has been reached a flush will happen, generating a new Lucene commit. Defaults to `512mb`. + +`index.translog.retention.size`:: + +The total size of translog files to keep. Keeping more translog files increases the chance of performing +an operation based sync when recovering replicas. If the translog files are not sufficient, replica recovery +will fall back to a file based sync. Defaults to `512mb` + + +`index.translog.retention.age`:: + +The maximum duration for which translog files will be kept. Defaults to `12h`. + + [float] [[corrupt-translog-truncation]] === What to do if the translog becomes corrupted? diff --git a/docs/reference/index-shared1.asciidoc b/docs/reference/index-shared1.asciidoc new file mode 100644 index 00000000000..9325bd6e73e --- /dev/null +++ b/docs/reference/index-shared1.asciidoc @@ -0,0 +1,4 @@ + +include::getting-started.asciidoc[] + +include::setup.asciidoc[] diff --git a/docs/reference/index-shared.asciidoc b/docs/reference/index-shared2.asciidoc similarity index 62% rename from docs/reference/index-shared.asciidoc rename to docs/reference/index-shared2.asciidoc index 9f9ec1dc450..0a0e3aaf57d 100644 --- a/docs/reference/index-shared.asciidoc +++ b/docs/reference/index-shared2.asciidoc @@ -1,11 +1,4 @@ -include::../Versions.asciidoc[] - - -include::getting-started.asciidoc[] - -include::setup.asciidoc[] - include::migration/index.asciidoc[] include::api-conventions.asciidoc[] @@ -33,13 +26,3 @@ include::modules.asciidoc[] include::index-modules.asciidoc[] include::ingest.asciidoc[] - -include::how-to.asciidoc[] - -include::testing.asciidoc[] - -include::glossary.asciidoc[] - -include::release-notes.asciidoc[] - -include::redirects.asciidoc[] diff --git a/docs/reference/index-shared3.asciidoc b/docs/reference/index-shared3.asciidoc new file mode 100644 index 00000000000..cf685c15253 --- /dev/null +++ b/docs/reference/index-shared3.asciidoc @@ -0,0 +1,10 @@ + +include::how-to.asciidoc[] + +include::testing.asciidoc[] + +include::glossary.asciidoc[] + +include::release-notes.asciidoc[] + +include::redirects.asciidoc[] diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 992792455d1..f4eed6de092 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,4 +1,11 @@ [[elasticsearch-reference]] = Elasticsearch Reference -include::index-shared.asciidoc[] +:es-test-dir: {docdir}/../src/test +:plugins-examples-dir: {docdir}/../../plugins/examples +:docs-dir: {docdir}/../../../docs + +include::../Versions.asciidoc[] +include::index-shared1.asciidoc[] +include::index-shared2.asciidoc[] +include::index-shared3.asciidoc[] diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index bc057e155d0..1d12e0f88c5 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -10,10 +10,12 @@ DELETE /twitter // CONSOLE // TEST[setup:twitter] -The above example deletes an index called `twitter`. Specifying an index, -alias or wildcard expression is required. +The above example deletes an index called `twitter`. Specifying an index or a +wildcard expression is required. Aliases cannot be used to delete an index. +Wildcard expressions are resolved to matching concrete indices only. -The delete index API can also be applied to more than one index, by either using a comma separated list, or on all indices (be careful!) by using `_all` or `*` as index. +The delete index API can also be applied to more than one index, by either +using a comma separated list, or on all indices (be careful!) by using `_all` or `*` as index. In order to disable allowing to delete indices via wildcards or `_all`, set `action.destructive_requires_name` setting in the config to `true`. diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 084befc07db..7759dd76433 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1186,8 +1186,7 @@ that is generally written for humans and not computer consumption. This processor comes packaged with over https://github.com/elastic/elasticsearch/tree/master/modules/ingest-common/src/main/resources/patterns[120 reusable patterns]. -If you need help building patterns to match your logs, you will find the and - applications quite useful! +If you need help building patterns to match your logs, you will find the {kibana-ref}/xpack-grokdebugger.html[Grok Debugger] tool quite useful! The Grok Debugger is an {xpack} feature under the Basic License and is therefore *free to use*. The Grok Constructor at is also a useful tool. [[grok-basics]] ==== Grok Basics @@ -1758,11 +1757,11 @@ caching see <>. | Name | Required | Default | Description | `lang` | no | "painless" | The scripting language | `id` | no | - | The stored script id to refer to -| `inline` | no | - | An inline script to be executed +| `source` | no | - | An inline script to be executed | `params` | no | - | Script Parameters |====== -One of `id` or `inline` options must be provided in order to properly reference a script to execute. +One of `id` or `source` options must be provided in order to properly reference a script to execute. You can access the current ingest document from within the script context by using the `ctx` variable. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 6e671fe7042..7cd2010726e 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -38,6 +38,8 @@ string:: <> and <> <>:: Accepts queries from the query-dsl +<>:: Defines parent/child relation for documents within the same index + [float] === Multi-fields @@ -84,7 +86,7 @@ include::types/token-count.asciidoc[] include::types/percolator.asciidoc[] - +include::types/parent-join.asciidoc[] diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc new file mode 100644 index 00000000000..63bdea1dc4d --- /dev/null +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -0,0 +1,426 @@ +[[parent-join]] +=== `join` datatype + +The `join` datatype is a special field that creates +parent/child relation within documents of the same index. +The `relations` section defines a set of possible relations within the documents, +each relation being a parent name and a child name. +A parent/child relation can be defined as follows: + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "doc": { + "properties": { + "my_join_field": { <1> + "type": "join", + "relations": { + "my_parent": "my_child" <2> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> The name for the field +<2> Defines a single relation where `my_parent` is parent of `my_child`. + +To index a document with a join, the name of the relation and the optional parent +of the document must be provided in the `source`. +For instance the following creates two parent documents in the `my_parent` context: + +[source,js] +-------------------------------------------------- +PUT my_index/doc/1?refresh +{ + "text": "This is a parent document", + "my_join_field": "my_parent" <1> +} + +PUT my_index/doc/2?refresh +{ + "text": "This is a another parent document", + "my_join_field": "my_parent" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> This document is a `my_parent` document. + +When indexing a child, the name of the relation as well as the parent id of the document +must be added in the `_source`. + +WARNING: It is required to index the lineage of a parent in the same shard so you must +always route child documents using their greater parent id. + + +For instance the following index two children documents pointing to the same parent `1 +with a `routing` value equals to the `id` of the parent: + +[source,js] +-------------------------------------------------- +PUT my_index/doc/3?routing=1&refresh <1> +{ + "text": "This is a child document", + "my_join_field": { + "name": "my_child", <2> + "parent": "1" <3> + } +} + +PUT my_index/doc/4?routing=1&refresh +{ + "text": "This is a another child document", + "my_join_field": { + "name": "my_child", + "parent": "1" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> This child document must be on the same shard than its parent +<2> `my_child` is the name of the join for this document +<3> The parent id of this child document + +==== Parent-join restrictions + +* Only one `join` field is allowed per index mapping. +* Parent and child documents must be indexed on the same shard. + This means that the same `routing` value needs to be provided when + <>, <>, or <> + a child document. +* An element can have multiple children but only one parent. +* It is possible to add a new relation to an existing `join` field. +* It is also possible to add a child to an existing element + but only if the element is already a parent. + +==== Searching with parent-join + +The parent-join creates one field to index the name of the relation +within the document (`my_parent`, `my_child`, ...). + +It also creates one field per parent/child relation. +The name of this field is the name of the `join` field followed by `#` and the +name of the parent in the relation. +So for instance for the `my_parent` => [`my_child`, `another_child`] relation, +the `join` field creates an additional field named `my_join_field#my_parent`. + +This field contains the parent `_id` that the document links to +if the document is a child (`my_child` or `another_child`) and the `_id` of +document if it's a parent (`my_parent`). + +When searching an index that contains a `join` field, these two fields are always +returned in the search response: + +[source,js] +-------------------------- +GET my_index/_search +{ + "query": { + "match_all": {} + }, + "sort": ["_id"] +} +-------------------------- +// CONSOLE +// TEST[continued] + +Will return: + +[source,js] +-------------------------------------------------- +{ + ..., + "hits": { + "total": 4, + "max_score": null, + "hits": [ + { + "_index": "my_index", + "_type": "doc", + "_id": "1", + "_score": null, + "_source": { + "text": "This is a parent document", + "my_join_field": "my_parent" + }, + "fields": { + "my_join_field": [ + "my_parent" <1> + ] + }, + "sort": [ + "1" + ] + }, + { + "_index": "my_index", + "_type": "doc", + "_id": "2", + "_score": null, + "_source": { + "text": "This is a another parent document", + "my_join_field": "my_parent" + }, + "fields": { + "my_join_field": [ + "my_parent" <2> + ] + }, + "sort": [ + "2" + ] + }, + { + "_index": "my_index", + "_type": "doc", + "_id": "3", + "_score": null, + "_routing": "1", + "_source": { + "text": "This is a child document", + "my_join_field": { + "name": "my_child", <3> + "parent": "1" <4> + } + }, + "fields": { + "my_join_field": [ + "my_child" + ], + "my_join_field#my_parent": [ + "1" + ] + }, + "sort": [ + "3" + ] + }, + { + "_index": "my_index", + "_type": "doc", + "_id": "4", + "_score": null, + "_routing": "1", + "_source": { + "text": "This is a another child document", + "my_join_field": { + "name": "my_child", + "parent": "1" + } + }, + "fields": { + "my_join_field": [ + "my_child" + ], + "my_join_field#my_parent": [ + "1" + ] + }, + "sort": [ + "4" + ] + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"timed_out": false, "took": $body.took, "_shards": $body._shards/] + +<1> This document belongs to the `my_parent` join +<2> This document belongs to the `my_parent` join +<3> This document belongs to the `my_child` join +<4> The linked parent id for the child document + +==== Parent-join queries and aggregations + +See the <> and +<> queries, +the <> aggregation, +and <> for more information. + +The value of the `join` field is accessible in aggregations +and scripts, and may be queried with the +<>: + +[source,js] +-------------------------- +GET my_index/_search +{ + "query": { + "parent_id": { <1> + "type": "my_child", + "id": "1" + } + }, + "aggs": { + "parents": { + "terms": { + "field": "my_join_field#my_parent", <2> + "size": 10 + } + } + }, + "script_fields": { + "parent": { + "script": { + "source": "doc['my_join_field#my_parent']" <3> + } + } + } +} +-------------------------- +// CONSOLE +// TEST[continued] + +<1> Querying the `parent id` field (also see the <> and the <>) +<2> Aggregating on the `parent id` field (also see the <> aggregation) +<3> Accessing the parent id` field in scripts + + +==== Global ordinals + +The `join` field uses <> to speed up joins. +Global ordinals need to be rebuilt after any change to a shard. The more +parent id values are stored in a shard, the longer it takes to rebuild the +global ordinals for the `join` field. + +Global ordinals, by default, are built eagerly: if the index has changed, +global ordinals for the `join` field will be rebuilt as part of the refresh. +This can add significant time to the refresh. However most of the times this is the +right trade-off, otherwise global ordinals are rebuilt when the first parent-join +query or aggregation is used. This can introduce a significant latency spike for +your users and usually this is worse as multiple global ordinals for the `join` +field may be attempt rebuilt within a single refresh interval when many writes +are occurring. + +When the `join` field is used infrequently and writes occur frequently it may +make sense to disable eager loading: + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "doc": { + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": "my_child" + }, + "eager_global_ordinals": false + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +The amount of heap used by global ordinals can be checked per parent relation +as follows: + +[source,sh] +-------------------------------------------------- +# Per-index +GET _stats/fielddata?human&fields=my_join_field#my_parent + +# Per-node per-index +GET _nodes/stats/indices/fielddata?human&fields=my_join_field#my_parent +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +==== Multiple levels of parent join + +It is also possible to define multiple children for a single parent: + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "doc": { + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": ["my_child", "another_child"] <1> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> `my_parent` is parent of `my_child`. + +And multiple levels of parent/child: + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "doc": { + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": ["my_child", "another_child"], <1> + "another_child": "grand_child" <2> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> `my_parent` is parent of `my_child` and `another_child` +<2> `another_child` is parent of `grand_child` + +The mapping above represents the following tree: + + my_parent + / \ + / \ + my_child another_child + | + | + grand_child + +Indexing a grand child document requires a `routing` value equals +to the grand-parent (the greater parent of the lineage): + + +[source,js] +-------------------------------------------------- +PUT my_index/doc/3?routing=1&refresh <1> +{ + "text": "This is a grand child document", + "my_join_field": { + "name": "grand_child", + "parent": "2" <2> + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> This child document must be on the same shard than its grandparent and parent +<2> The parent id of this document (must points to an `another_child` document) + + diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 1a5121ae307..68b734f0c8d 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -85,9 +85,15 @@ fail. [float] ==== Limitations +[float] +===== Parent/child + Because the `percolate` query is processing one document at a time, it doesn't support queries and filters that run against child documents such as `has_child` and `has_parent`. +[float] +===== Fetching queries + There are a number of queries that fetch data via a get call during query parsing. For example the `terms` query when using terms lookup, `template` query when using indexed scripts and `geo_shape` when using pre-indexed shapes. When these queries are indexed by the `percolator` field type then the get call is executed once. So each time the `percolator` @@ -95,3 +101,11 @@ query evaluates these queries, the fetches terms, shapes etc. as the were upon i is that fetching of terms that these queries do, happens both each time the percolator query gets indexed on both primary and replica shards, so the terms that are actually indexed can be different between shard copies, if the source index changed while indexing. + +[float] +===== Script query + +The script inside a `script` query can only access doc values fields. The `percolate` query indexes the provided document +into an in-memory index. This in-memory index doesn't support stored fields and because of that the `_source` field and +other stored fields are not stored. This is the reason why in the `script` query the `_source` and other stored fields +aren't available. diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index b14dc6e52fa..16110e6b21b 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -12,7 +12,6 @@ is a notable exception). If you need to index structured content such as email addresses, hostnames, status codes, or tags, it is likely that you should rather use a <> field. -codes, or tags, it is likely that you should rather use a <> field. Below is an example of a mapping for a text field: diff --git a/docs/reference/migration/migrate_6_0/indices.asciidoc b/docs/reference/migration/migrate_6_0/indices.asciidoc index 5ef42303a54..b52a403ca2a 100644 --- a/docs/reference/migration/migrate_6_0/indices.asciidoc +++ b/docs/reference/migration/migrate_6_0/indices.asciidoc @@ -56,3 +56,20 @@ will be marked for deletion. The index parameter in the update-aliases, put-alias, and delete-alias APIs no longer accepts alias names. Instead, it accepts only index names (or wildcards which will expand to matching indices). + +==== Delete index api resolves indices expressions only against indices + +The index parameter in the delete index API no longer accepts alias names. +Instead, it accepts only index names (or wildcards which will expand to +matching indices). + +==== Support for `+` has been removed in index expressions + +Omitting the `+` has the same effect as specifying it, hence support for `+` +has been removed in index expressions. + +==== Translog retention + +Translog files are now kept for up to 12 hours (by default), with a maximum size of `512mb` (default), and +are no longer deleted on `flush`. This is to increase the chance of doing an operation based recovery when +bringing up replicas up to speed. diff --git a/docs/reference/migration/migrate_6_0/java.asciidoc b/docs/reference/migration/migrate_6_0/java.asciidoc index 3bad61decb0..aed716c9c7f 100644 --- a/docs/reference/migration/migrate_6_0/java.asciidoc +++ b/docs/reference/migration/migrate_6_0/java.asciidoc @@ -41,3 +41,8 @@ Use `BucketOrder.key(boolean)` to order the `terms` aggregation buckets by `_ter In `BulkResponse`, `SearchResponse` and `TermVectorsResponse` `getTookInMiilis()` method has been removed in favor of `getTook` method. `getTookInMiilis()` is easily replaced by `getTook().getMillis()`. + +=== `GetField` and `SearchHitField` replaced by `DocumentField` + +As `GetField` and `SearchHitField` have the same members, they have been unified into +`DocumentField`. diff --git a/docs/reference/migration/migrate_6_0/mappings.asciidoc b/docs/reference/migration/migrate_6_0/mappings.asciidoc index 369ba3da162..e47c9562db0 100644 --- a/docs/reference/migration/migrate_6_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_6_0/mappings.asciidoc @@ -29,3 +29,14 @@ now disallowed for these indices' mappings. Previously Elasticsearch would silently ignore any dynamic templates that included a `match_mapping_type` type that was unrecognized. An exception is now thrown on an unrecognized type. + +==== Synonym Token Filter + +In 6.0, Synonym Token Filter tokenize synonyms with whatever +tokenizer and token filters appear before it in the chain. + +`tokenizer` and `ignore_case` are deprecated. +These parameters are still left for backwards compatibility +for indices that created before 6.0. +And elasticsearch ignores these properties for new indices. + diff --git a/docs/reference/migration/migrate_6_0/packaging.asciidoc b/docs/reference/migration/migrate_6_0/packaging.asciidoc index fd0cd31d0af..6ddd84d3e2d 100644 --- a/docs/reference/migration/migrate_6_0/packaging.asciidoc +++ b/docs/reference/migration/migrate_6_0/packaging.asciidoc @@ -9,3 +9,29 @@ possible, the DEB and RPM packages now exclusively use the user and group `elasticsearch`. If a custom user or group is needed then a provisioning system should use the tarball distribution instead of the provided RPM and DEB packages. + +==== `path.conf` is no longer a configurable setting + +Previous versions of Elasticsearch enabled setting `path.conf` as a +setting. This was rather convoluted as it meant that you could start +Elasticsearch with a config file that specified via `path.conf` that +Elasticsearch should use another config file. Instead, `path.conf` is now a +command-line flag. To start Elasticsearch with a custom config file, use `-c +/path/to/config` or `--path.conf /path/to/config`. Here, `/path/to/config` is +the *directory* containing the config file. + +==== Default path settings are removed + +Previous versions of Elasticsearch enabled setting `default.path.data` and +`default.path.logs` to set the default data path and default logs path if they +were not otherwise set in the configuration file. These settings have been +removed and now data paths and log paths can be configured via settings +only. Related, this means that the environment variables `DATA_DIR` and +`LOG_DIR` no longer have any effect as these were used to set +`default.path.data` and `default.path.logs` in the packaging scripts. + +==== 32-bit is no longer maintained + +We previously attempted to ensure that Elasticsearch could be started on 32-bit +JVM (although a bootstrap check prevented using a 32-bit JVM in production). We +are no longer maintaining this attempt. diff --git a/docs/reference/migration/migrate_6_0/plugins.asciidoc b/docs/reference/migration/migrate_6_0/plugins.asciidoc index 9f68f55472d..efb7328030e 100644 --- a/docs/reference/migration/migrate_6_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_6_0/plugins.asciidoc @@ -73,3 +73,20 @@ and `cloud.aws.ec2.region`. Instead, specify the full endpoint. Previous versions of Elasticsearch would skip hidden files and directories when scanning the plugins folder. This leniency has been removed. + +==== ICU Analysis plugin + +The icu4j library has been upgraded to 59.1, +Indices created in the previous major version will need to be reindexed +in order to return correct (and correctly ordered) results, +and to take advantage of new characters. + +==== Plugins should not construct `Environment` instances from `Settings` + +Previously, plugins could construct an `Environment` instance from `Settings` to +discover the path to plugin-specific config files. This will no longer work in +all situations as the `Settings` object does not carry the necessary information +for the config path to be set correctly. Instead, plugins that need to know the +config path should have a single constructor that accepts a pair of `Settings` +and `Path` instances, and construct an `Environment` using the corresponding +constructor on `Environment`. diff --git a/docs/reference/migration/migrate_6_0/search.asciidoc b/docs/reference/migration/migrate_6_0/search.asciidoc index 339af404871..0a6db5cacad 100644 --- a/docs/reference/migration/migrate_6_0/search.asciidoc +++ b/docs/reference/migration/migrate_6_0/search.asciidoc @@ -53,6 +53,9 @@ * The `template` query has been removed. This query was deprecated since 5.0 +* The `percolate` query's `document_type` has been deprecated. From 6.0 and later + it is no longer required to specify the `document_type` parameter. + ==== Search shards API The search shards API no longer accepts the `type` url parameter, which didn't diff --git a/docs/reference/migration/migrate_6_0/settings.asciidoc b/docs/reference/migration/migrate_6_0/settings.asciidoc index 5d3f0c44524..99ead4f1f44 100644 --- a/docs/reference/migration/migrate_6_0/settings.asciidoc +++ b/docs/reference/migration/migrate_6_0/settings.asciidoc @@ -75,4 +75,10 @@ deprecation warning. ==== Script Settings All of the existing scripting security settings have been removed. Instead -they are replaced with `script.allowed_types` and `script.allowed_contexts`. \ No newline at end of file +they are replaced with `script.allowed_types` and `script.allowed_contexts`. + +==== Discovery Settings + +The `discovery.type` settings no longer supports the values `gce`, `aws` and `ec2`. +Integration with these platforms should be done by setting the `discovery.zen.hosts_provider` setting to +one of those values. diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index f8a34019bfc..d6024a04845 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -107,7 +107,9 @@ separated by a `:` character: -------------------------------------------------- POST /cluster_one:twitter/tweet/_search { + "query": { "match_all": {} + } } -------------------------------------------------- // CONSOLE @@ -120,7 +122,9 @@ clusters: -------------------------------------------------- POST /cluster_one:twitter,twitter/tweet/_search { + "query": { "match_all": {} + } } -------------------------------------------------- // CONSOLE diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc index e3896f718d9..fc04c5e9c63 100644 --- a/docs/reference/modules/indices/request_cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -103,7 +103,7 @@ IMPORTANT: If your query uses a script whose result is not deterministic (e.g. it uses a random function or references the current time) you should set the `request_cache` flag to `false` to disable caching for that request. -Requests `size` is greater than 0 will not be cached even if the request cache is +Requests where `size` is greater than 0 will not be cached even if the request cache is enabled in the index settings. To cache these requests you will need to use the query-string parameter detailed here. diff --git a/docs/reference/modules/scripting/engine.asciidoc b/docs/reference/modules/scripting/engine.asciidoc index be103599152..37baa0801c9 100644 --- a/docs/reference/modules/scripting/engine.asciidoc +++ b/docs/reference/modules/scripting/engine.asciidoc @@ -17,7 +17,7 @@ the document frequency of a provided term. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{docdir}/../../plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java[expert_engine] +include-tagged::{plugins-examples-dir}/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java[expert_engine] -------------------------------------------------- You can execute the script by specifying its `lang` as `expert_scripts`, and the name diff --git a/docs/reference/modules/tribe.asciidoc b/docs/reference/modules/tribe.asciidoc index f014932db7a..8e2d75990c4 100644 --- a/docs/reference/modules/tribe.asciidoc +++ b/docs/reference/modules/tribe.asciidoc @@ -88,7 +88,6 @@ configuration options are passed down from the tribe node to each node client: * `transport.bind_host` * `transport.publish_host` * `path.home` -* `path.conf` * `path.logs` * `shield.*` diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 296255d2ded..24511ad60fb 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -65,7 +65,6 @@ GET /my-index/_search "query" : { "percolate" : { "field" : "query", - "document_type" : "doc", "document" : { "message" : "A new bonsai tree in the office" } @@ -90,13 +89,13 @@ The above request will yield the following response: }, "hits": { "total": 1, - "max_score": 0.5716521, + "max_score": 0.5753642, "hits": [ { <1> "_index": "my-index", "_type": "doc", "_id": "1", - "_score": 0.5716521, + "_score": 0.5753642, "_source": { "query": { "match": { @@ -119,7 +118,7 @@ The above request will yield the following response: The following parameters are required when percolating a document: [horizontal] -`field`:: The field of type `percolator` and that holds the indexed queries. This is a required parameter. +`field`:: The field of type `percolator` that holds the indexed queries. This is a required parameter. `document_type`:: The type / mapping of the document being percolated. This is a required parameter. `document`:: The source of the document being percolated. @@ -190,7 +189,6 @@ GET /my-index/_search "query" : { "percolate" : { "field": "query", - "document_type" : "doc", "index" : "my-index", "type" : "doc", "id" : "2", @@ -202,7 +200,7 @@ GET /my-index/_search // CONSOLE // TEST[continued] -<1> The version is optional, but useful in certain cases. We can then ensure that we are try to percolate +<1> The version is optional, but useful in certain cases. We can ensure that we are trying to percolate the document we just have indexed. A change may be made after we have indexed, and if that is the case the then the search request would fail with a version conflict error. @@ -261,7 +259,6 @@ GET /my-index/_search "query" : { "percolate" : { "field": "query", - "document_type" : "doc", "document" : { "message" : "The quick brown fox jumps over the lazy dog" } @@ -291,13 +288,13 @@ This will yield the following response. }, "hits": { "total": 2, - "max_score": 0.5446649, + "max_score": 0.5753642, "hits": [ { "_index": "my-index", "_type": "doc", "_id": "4", - "_score": 0.5446649, + "_score": 0.5753642, "_source": { "query": { "match": { @@ -315,7 +312,7 @@ This will yield the following response. "_index": "my-index", "_type": "doc", "_id": "3", - "_score": 0.5446649, + "_score": 0.5753642, "_source": { "query": { "match": { @@ -344,12 +341,12 @@ the document defined in the `percolate` query. ==== How it Works Under the Hood When indexing a document into an index that has the <> mapping configured, the query -part of the documents gets parsed into a Lucene query and are stored into the Lucene index. A binary representation +part of the document gets parsed into a Lucene query and is stored into the Lucene index. A binary representation of the query gets stored, but also the query's terms are analyzed and stored into an indexed field. At search time, the document specified in the request gets parsed into a Lucene document and is stored in a in-memory temporary Lucene index. This in-memory index can just hold this one document and it is optimized for that. After this -a special query is build based on the terms in the in-memory index that select candidate percolator queries based on +a special query is built based on the terms in the in-memory index that select candidate percolator queries based on their indexed query terms. These queries are then evaluated by the in-memory index if they actually match. The selecting of candidate percolator queries matches is an important performance optimization during the execution diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 61a8ac641c0..04ff66f6c37 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -35,11 +35,11 @@ This will yield the following result: "_id": "0", "matched": true, "explanation": { - "value": 1.55077, + "value": 1.6943599, "description": "weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:", "details": [ { - "value": 1.55077, + "value": 1.6943599, "description": "score(doc=0,freq=1.0 = termFreq=1.0\n), product of:", "details": [ { @@ -59,7 +59,7 @@ This will yield the following result: ] }, { - "value": 1.1186441, + "value": 1.2222223, "description": "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength)) from:", "details": [ { @@ -83,7 +83,7 @@ This will yield the following result: "details": [] }, { - "value": 4.0, + "value": 3.0, "description": "fieldLength", "details": [] } diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 2f3d395b21f..73cf54046b6 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -457,13 +457,13 @@ Response: ... "hits": { "total": 1, - "max_score": 1.4818809, + "max_score": 1.601195, "hits": [ { "_index": "twitter", "_type": "tweet", "_id": "1", - "_score": 1.4818809, + "_score": 1.601195, "_source": { "user": "test", "message": "some message with the number 1", @@ -513,13 +513,13 @@ Response: ... "hits": { "total": 1, - "max_score": 1.4818809, + "max_score": 1.601195, "hits": [ { "_index": "twitter", "_type": "tweet", "_id": "1", - "_score": 1.4818809, + "_score": 1.601195, "_source": { "user": "test", "message": "some message with the number 1", diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 2ad06233d7d..72d13d2d116 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -1,7 +1,7 @@ [[search-request-inner-hits]] === Inner hits -The <> and <> features allow the return of documents that +The < and <> features allow the return of documents that have matches in a different scope. In the parent/child case, parent documents are returned based on matches in child documents or child documents are returned based on matches in parent documents. In the nested case, documents are returned based on matches in nested inner objects. @@ -103,11 +103,11 @@ PUT test/doc/1?refresh "comments": [ { "author": "kimchy", - "text": "comment text" + "number": 1 }, { "author": "nik9000", - "text": "words words words" + "number": 2 } ] } @@ -118,7 +118,7 @@ POST test/_search "nested": { "path": "comments", "query": { - "match": {"comments.text" : "words"} + "match": {"comments.number" : 2} }, "inner_hits": {} <1> } @@ -137,29 +137,29 @@ An example of a response snippet that could be generated from the above search r ..., "hits": { "total": 1, - "max_score": 0.9651416, + "max_score": 1.0, "hits": [ { "_index": "test", "_type": "doc", "_id": "1", - "_score": 0.9651416, + "_score": 1.0, "_source": ..., "inner_hits": { "comments": { <1> "hits": { "total": 1, - "max_score": 0.9651416, + "max_score": 1.0, "hits": [ { "_nested": { "field": "comments", "offset": 1 }, - "_score": 0.9651416, + "_score": 1.0, "_source": { "author": "nik9000", - "text": "words words words" + "number": 2 } } ] @@ -263,26 +263,26 @@ Response not included in text but tested for completeness sake. ..., "hits": { "total": 1, - "max_score": 0.9651416, + "max_score": 1.0444683, "hits": [ { "_index": "test", "_type": "doc", "_id": "1", - "_score": 0.9651416, + "_score": 1.0444683, "_source": ..., "inner_hits": { "comments": { <1> "hits": { "total": 1, - "max_score": 0.9651416, + "max_score": 1.0444683, "hits": [ { "_nested": { "field": "comments", "offset": 1 }, - "_score": 0.9651416, + "_score": 1.0444683, "fields": { "comments.text": [ "words words words" @@ -425,33 +425,39 @@ This indirect referencing is only supported for nested inner hits. [[parent-child-inner-hits]] ==== Parent/child inner hits -The parent/child `inner_hits` can be used to include parent or child +The parent/child `inner_hits` can be used to include parent or child: [source,js] -------------------------------------------------- PUT test { - "settings": { - "mapping.single_type": false - }, "mappings": { - "my_parent": {}, - "my_child": { - "_parent": { - "type": "my_parent" + "doc": { + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": "my_child" + } + } } } } } -PUT test/my_parent/1?refresh +PUT test/doc/1?refresh { - "test": "test" + "number": 1, + "my_join_field": "my_parent" } -PUT test/my_child/1?parent=1&refresh +PUT test/doc/2?routing=1&refresh { - "test": "test" + "number": 1, + "my_join_field": { + "name": "my_child", + "parent": "1" + } } POST test/_search @@ -461,7 +467,7 @@ POST test/_search "type": "my_child", "query": { "match": { - "test": "test" + "number": 1 } }, "inner_hits": {} <1> @@ -478,40 +484,59 @@ An example of a response snippet that could be generated from the above search r [source,js] -------------------------------------------------- { - ..., - "hits": { - "total": 1, - "max_score": 1.0, - "hits": [ - { - "_index": "test", - "_type": "my_parent", - "_id": "1", - "_score": 1.0, - "_source": ..., - "inner_hits": { - "my_child": { - "hits": { - "total": 1, - "max_score": 0.18232156, - "hits": [ - { - "_type": "my_child", - "_id": "1", - "_score": 0.18232156, - "_routing": "1", - "_parent": "1", - "_source": { - "test": "test" - } + ..., + "hits": { + "total": 1, + "max_score": 1.0, + "hits": [ + { + "_index": "test", + "_type": "doc", + "_id": "1", + "_score": 1.0, + "_source": { + "number": 1, + "my_join_field": "my_parent" + }, + "fields": { + "my_join_field": [ + "my_parent" + ] + }, + "inner_hits": { + "my_child": { + "hits": { + "total": 1, + "max_score": 1.0, + "hits": [ + { + "_type": "doc", + "_id": "2", + "_score": 1.0, + "_routing": "1", + "_source": { + "number": 1, + "my_join_field": { + "name": "my_child", + "parent": "1" + } + }, + "fields": { + "my_join_field": [ + "my_child" + ], + "my_join_field#my_parent": [ + "1" + ] + } + } + ] + } + } } - ] } - } - } - } - ] - } + ] + } } -------------------------------------------------- // TESTRESPONSE[s/"_source": \.\.\./"_source": $body.hits.hits.0._source/] diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index e616b444dc2..7cc9d5ef805 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -1,5 +1,5 @@ [[setup]] -= Setup Elasticsearch += Set up Elasticsearch [partintro] -- @@ -34,10 +34,6 @@ refuse to start if a known-bad version of Java is used. The version of Java that Elasticsearch will use can be configured by setting the `JAVA_HOME` environment variable. -NOTE: Elasticsearch ships with default configuration for running Elasticsearch on 64-bit server JVMs. If you are using a 32-bit client JVM, -you must remove `-server` from <> and if you are using any 32-bit JVM you should reconfigure the thread stack size -from `-Xss1m` to `-Xss320k`. - -- include::setup/install.asciidoc[] diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index afaa211b592..44206564777 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -22,11 +22,11 @@ These files are located in the config directory, whose location defaults to location to `/etc/elasticsearch/`. The location of the config directory can be changed with the `path.conf` -setting, as follows: +flag, as follows: [source,sh] ------------------------------- -./bin/elasticsearch -Epath.conf=/path/to/my/config/ +./bin/elasticsearch --path.conf /path/to/my/config/ ------------------------------- [float] diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 9a8011c6133..f145728f8e3 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -7,9 +7,9 @@ Elasticsearch is provided in the following package formats: `zip`/`tar.gz`:: The `zip` and `tar.gz` packages are suitable for installation on any system -and are the easiest choice for getting started with Elasticsearch. +and are the easiest choice for getting started with Elasticsearch on most systems. + -<> or <> +<> or <> `deb`:: @@ -27,6 +27,14 @@ Elasticsearch website or from our RPM repository. + <> +`msi`:: + +The `msi` package is suitable for installation on Windows 64-bit systems with at least +.NET 4.5 framework installed, and is the easiest choice for getting started with +Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. ++ +<> + `docker`:: An image is available for running Elasticsearch as a Docker container. It ships with {xpack-ref}/index.html[X-Pack] pre-installed and may be downloaded from the Elastic Docker Registry. @@ -48,6 +56,8 @@ Ansible:: https://github.com/elastic/ansible-elasticsearch[ansible-elasticsearch include::install/zip-targz.asciidoc[] +include::install/zip-windows.asciidoc[] + include::install/deb.asciidoc[] include::install/rpm.asciidoc[] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index ef3aa0d3847..c626262c162 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -10,7 +10,7 @@ The source code can be found on https://github.com/elastic/elasticsearch-docker/ NOTE: {xpack-ref}/index.html[X-Pack] is preinstalled in this image. Please take a few minutes to familiarize yourself with {xpack-ref}/security-getting-started.html[X-Pack Security] and how to change default passwords. The default password for the `elastic` user is `changeme`. -NOTE: X-Pack includes a trial license for 30 days. After that, you can obtain one of the https://www.elastic.co/subscriptions[available subscriptions] or {xpack-ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. +NOTE: X-Pack includes a trial license for 30 days. After that, you can obtain one of the https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. Obtaining Elasticsearch for Docker is as simple as issuing a +docker pull+ command against the Elastic Docker registry. diff --git a/docs/reference/setup/install/sysconfig-file.asciidoc b/docs/reference/setup/install/sysconfig-file.asciidoc index 3070d08d578..c932493e115 100644 --- a/docs/reference/setup/install/sysconfig-file.asciidoc +++ b/docs/reference/setup/install/sysconfig-file.asciidoc @@ -21,14 +21,6 @@ about `max_map_count`. This is set via `sysctl` before starting elasticsearch. Defaults to `262144`. -`LOG_DIR`:: - - Log directory, defaults to `/var/log/elasticsearch`. - -`DATA_DIR`:: - - Data directory, defaults to `/var/lib/elasticsearch`. - `CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index d681ea2e69d..097178dd49b 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -1,9 +1,12 @@ [[windows]] -=== Install Elasticsearch on Windows +=== Install Elasticsearch with Windows MSI Installer -Elasticsearch can be installed on Windows using the `.zip` package. This -comes with a `elasticsearch-service.bat` command which will setup Elasticsearch to run as a -service. +Elasticsearch can be installed on Windows using the `.msi` package. This can +install Elasticsearch as a Windows service or allow it to be run manually using +the included `elasticsearch.exe` executable. + +TIP: Elasticsearch has historically been installed on Windows using the <> archive. +You can continue using the `.zip` approach if you prefer. The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. @@ -14,8 +17,8 @@ NOTE: Elasticsearch requires Java 8 or later. Use the http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] or an open-source distribution such as http://openjdk.java.net[OpenJDK]. -[[install-windows]] -==== Download and install the `.zip` package +[[download-msi]] +==== Download the `.msi` package ifeval::["{release-state}"=="unreleased"] @@ -25,33 +28,257 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip - -Unzip it with your favourite unzip tool. This will create a folder called -+elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal -window, `cd` to the `%ES_HOME%` directory, for instance: - -["source","sh",subs="attributes"] ----------------------------- -cd c:\elasticsearch-{version} ----------------------------- +Download the `.msi` package for Elasticsearch v{version} from https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.msi endif::[] -[[windows-running]] +[[install-msi-gui]] +==== Install using the graphical user interface (GUI) + +Double-click the downloaded `.msi` package to launch a GUI wizard that will guide you through the +installation process. You can view help on any step by clicking the `?` button, which reveals an +aside panel with additional information for each input: + +[[msi-installer-help]] +image::images/msi_installer/msi_installer_help.png[] + +Within the first screen, select the directory for the installation. In addition, select directories for where +data, logs and configuration will reside or <>: + +[[msi-installer-locations]] +image::images/msi_installer/msi_installer_locations.png[] + +Then select whether to install as a service or start Elasticsearch manually as needed. When +installing as a service, you can also decide which account to run the service under as well +as whether the service should be started after installation and when Windows is started or +restarted: + +[[msi-installer-service]] +image::images/msi_installer/msi_installer_service.png[] + +IMPORTANT: When selecting an account to run the service with, be sure that the chosen account +has sufficient privileges to access the installation and other deployment directories chosen. + +Common configuration settings are exposed within the Configuration section, allowing the cluster +name, node name and roles to be set, in addition to memory and network settings: + +[[msi-installer-configuration]] +image::images/msi_installer/msi_installer_configuration.png[] + +Finally, the installer provides a list of common plugins that can be downloaded and installed as +part of the installation: + +[[msi-installer-selected-plugins]] +image::images/msi_installer/msi_installer_selected_plugins.png[] + +By default, the {xpack-ref}/index.html[X-Pack] plugin will be selected to be installed, and if +installing with the <> node role, the {plugins}/ingest-attachment.html[Ingest Attachment Processor] and {plugins}/ingest-geoip.html[Ingest GeoIP Processor] plugins will also be selected for installation. + +NOTE: X-Pack includes a trial license for 30 days. After that, you can obtain one of the https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. + +After clicking the install button, Elasticsearch will be installed: + +[[msi-installer-success]] +image::images/msi_installer/msi_installer_success.png[] + +[[install-msi-command-line]] +==== Install using the command line + +The `.msi` can also install Elasticsearch using the command line. The simplest installation +using the same defaults as the GUI is achieved by first navigating to the download directory, +then running: + +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +msiexec.exe /i elasticsearch-{version}.msi /qn +-------------------------------------------- + +By default, msiexec does not wait for the installation process to complete, since it runs in the +Windows subsystem. To wait on the process to finish and ensure that `%ERRORLEVEL%` is set +accordingly, it is recommended to use `start /wait` to create a process and wait for it to exit + +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn +-------------------------------------------- + +As with any MSI installation package, a log file for the installation process can be found +within the `%TEMP%` directory, with a randomly generated name adhering to the format +`MSI*.LOG`. The path to a log file can be supplied using the `/l` command line argument + +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn /l install.log +-------------------------------------------- + +Supported Windows Installer command line arguments can be viewed using + +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +msiexec.exe /help +-------------------------------------------- + +or by consulting the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options]. + +[[msi-command-line-options]] +==== Command line options + +All settings exposed within the GUI are also available as command line arguments (referred to +as _properties_ within Windows Installer documentation) that can be passed to msiexec: + +[horizontal] +`INSTALLDIR`:: + + The installation directory. Defaults to `%PROGRAMFILES%\Elastic\Elasticsearch` + +`DATADIRECTORY`:: + + The directory in which to store your data. +Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\data` + +`CONFIGDIRECTORY`:: + + The directory in which to store your configuration. + Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\config` + +`LOGSDIRECTORY`:: + + The directory in which to store your logs. + Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\logs` + +`PLACEWRITABLELOCATIONSINSAMEPATH`:: + + Whether the data, configuration and logs directories + should be created under the installation directory. Defaults to `false` + +`INSTALLASSERVICE`:: + + Whether Elasticsearch is installed and configured as a Windows Service. + Defaults to `true` + +`STARTAFTERINSTALL`:: + + Whether the Windows Service is started after installation finishes. + Defaults to `true` + +`STARTWHENWINDOWSSTARTS`:: + + Whether the Windows Service is started when Windows is started. + Defaults to `true` + +`USELOCALSYSTEM`:: + + Whether the Windows service runs under the LocalSystem Account. + Defaults to `true` + +`USENETWORKSERVICE`:: + + Whether the Windows service runs under the NetworkService Account. Defaults + to `false` + +`USEEXISTINGUSER`:: + + Whether the Windows service runs under a specified existing account. Defaults + to `false` + +`USER`:: + + The username for the account under which the Windows service runs. Defaults to `""` + +`PASSWORD`:: + + The password for the account under which the Windows service runs. Defaults to `""` + +`CLUSTERNAME`:: + + The name of the cluster. Defaults to `elasticsearch` + +`NODENAME`:: + + The name of the node. Defaults to `%COMPUTERNAME%` + +`MASTERNODE`:: + + Whether Elasticsearch is configured as a master node. Defaults to `true` + +`DATANODE`:: + + Whether Elasticsearch is configured as a data node. Defaults to `true` + +`INGESTNODE`:: + + Whether Elasticsearch is configured as an ingest node. Defaults to `true` + +`SELECTEDMEMORY`:: + + The amount of memory to allocate to the JVM heap for Elasticsearch. + Defaults to half of the available memory on the target machine, up to a maximum of 30.5GB + +`LOCKMEMORY`:: + + Whether `bootstrap.memory_lock` should be used to try to lock the process + address space into RAM. Defaults to `true` + +`UNICASTNODES`:: + + A comma separated list of hosts in the form `host:port` or `host` to be used for + unicast discovery. Defaults to `""` + +`MINIMUMMASTERNODES`:: + + The minimum number of master-eligible nodes that must be visible + in order to form a cluster. Defaults to `""` + +`NETWORKHOST`:: + + The hostname or IP address to bind the node to and _publish_ (advertise) this + host to other nodes in the cluster. Defaults to `""` + +`HTTPPORT`:: + + The port to use for exposing Elasticsearch APIs over HTTP. Defaults to `9200` + +`TRANSPORTPORT`:: + + The port to use for internal communication between nodes within the cluster. + Defaults to `9300` + +`PLUGINS`:: + + A comma separated list of the plugins to download and install as part of the installation. Defaults to + `x-pack, ingest-attachment, ingest-geoip` + +To pass a value, simply append the property name and value using the format `=""` to +the installation command. For example, to use a different installation directory to the default one: + +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom Install Directory" +-------------------------------------------- + +Consult the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options] +for additional rules related to values containing quotation marks. + +[[msi-installer-command-line-running]] ==== Running Elasticsearch from the command line -Elasticsearch can be started from the command line as follows: +Once installed, Elasticsearch can be started from the command line, if not installed as a service +and configured to start when installation completes, as follows: -[source,sh] +["source","sh",subs="attributes,callouts"] -------------------------------------------- -.\bin\elasticsearch.bat +.\bin\elasticsearch.exe -------------------------------------------- -By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT`, -and can be stopped by pressing `Ctrl-C`. +The command line terminal will display output similar to the following: -[[windows-configuring]] +[[msi-installer-elasticsearch-exe]] +image::images/msi_installer/elasticsearch_exe.png[] + +By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT` in addition +to the `.log` file within `LOGSDIRECTORY`, and can be stopped by pressing `Ctrl-C`. + +[[msi-installer-command-line-configuration]] ==== Configuring Elasticsearch on the command line Elasticsearch loads its configuration from the `%ES_HOME%\config\elasticsearch.yml` @@ -61,12 +288,12 @@ file by default. The format of this config file is explained in Any settings that can be specified in the config file can also be specified on the command line, using the `-E` syntax as follows: -[source,sh] +["source","sh",subs="attributes,callouts"] -------------------------------------------- -.\bin\elasticsearch.bat -Ecluster.name=my_cluster -Enode.name=node_1 +.\bin\elasticsearch.exe -E cluster.name=my_cluster -E node.name=node_1 -------------------------------------------- -NOTE: Values that contain spaces must be surrounded with quotes. For instance `-Epath.logs="C:\My Logs\logs"`. +NOTE: Values that contain spaces must be surrounded with quotes. For instance `-E path.logs="C:\My Logs\logs"`. TIP: Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings @@ -74,187 +301,124 @@ such as `node.name` could be specified on the command line. include::check-running.asciidoc[] -[[windows-service]] +[[msi-installer-windows-service]] ==== Installing Elasticsearch as a Service on Windows Elasticsearch can be installed as a service to run in the background or start -automatically at boot time without any user interaction. This can be achieved -through the `elasticsearch-service.bat` script in the `bin\` folder which allows one to -install, remove, manage or configure the service and potentially start and -stop the service, all from the command-line. +automatically at boot time without any user interaction. This can be achieved upon installation +using the following command line options + +* `INSTALLASSERVICE=true` +* `STARTAFTERINSTALL=true` +* `STARTWHENWINDOWSSTARTS=true` + +Once installed, Elasticsearch will appear within the Services control panel: + +[[msi-installer-installed-service]] +image::images/msi_installer/msi_installer_installed_service.png[] + +and can be stopped and restarted from within the control panel, or from the command line using: + +with Command Prompt: + +[source,sh] +-------------------------------------------- +sc.exe stop Elasticsearch +sc.exe start Elasticsearch +-------------------------------------------- + +with PowerShell: + +[source,powershell] +-------------------------------------------- +Get-Service Elasticsearch | Stop-Service | Start-Service +-------------------------------------------- + +Changes can be made to jvm.options and elasticsearch.yml configuration files to configure the +service after installation. Most changes (like JVM settings) will require a restart of the +service in order to take affect. + +[[upgrade-msi-gui]] +==== Upgrade using the graphical user interface (GUI) + +The `.msi` package supports upgrading an installed version of Elasticsearch to a newer +version of Elasticsearch. The upgrade process handles upgrading all installed plugins as +well as retaining both your data and configuration. + +Downloading and clicking on a newer version of the `.msi` package will launch the GUI wizard. +The first step will list the read only properties from the previous installation: + +[[msi-installer-upgrade-notice]] +image::images/msi_installer/msi_installer_upgrade_notice.png[] + +The following configuration step allows certain configuration options to be changed: + +[[msi-installer-upgrade-configuration]] +image::images/msi_installer/msi_installer_upgrade_configuration.png[] + +Finally, the plugins step allows currently installed plugins to be upgraded or removed, and +for plugins not currently installed, to be downloaded and installed: + +[[msi-installer-upgrade-plugins]] +image::images/msi_installer/msi_installer_upgrade_plugins.png[] + +[[upgrade-msi-command-line]] +==== Upgrade using the command line + +The `.msi` can also upgrade Elasticsearch using the command line. The simplest upgrade +using the same defaults as the currently installed version is achieved by first +navigating to the download directory, then running: ["source","sh",subs="attributes,callouts"] --------------------------------------------------- -c:\elasticsearch-{version}{backslash}bin>elasticsearch-service +-------------------------------------------- +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn +-------------------------------------------- -Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] --------------------------------------------------- +Similar to the install process, a path to a log file for the upgrade process can +be passed using the `/l` command line argument -The script requires one parameter (the command to execute) followed by an -optional one indicating the service id (useful when installing multiple -Elasticsearch services). +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +start /wait msiexec.exe /i elasticsearch-{version}.msi /qn /l upgrade.log +-------------------------------------------- -The commands available are: +[[uninstall-msi-gui]] +==== Uninstall using Add/Remove Programs -[horizontal] -`install`:: Install Elasticsearch as a service +The `.msi` package handles uninstallation of all directories and files added as part of installation. -`remove`:: Remove the installed Elasticsearch service (and stop the service if started) +WARNING: Uninstallation will remove all directories and their contents created as part of +installation, including data within the data directory. If you wish to retain your data upon +uninstallation, it is recommended that you make a copy of the data directory before uninstallation. -`start`:: Start the Elasticsearch service (if installed) +MSI installer packages do not provide a GUI for uninstallation. An installed program can be uninstalled +by pressing the Windows key and typing `add or remove programs` to open the system settings. -`stop`:: Stop the Elasticsearch service (if started) +Once opened, find the Elasticsearch installation within the list of installed applications, click +and choose `Uninstall`: -`manager`:: Start a GUI for managing the installed service +[[msi-installer-uninstall]] +image::images/msi_installer/msi_installer_uninstall.png[] -Based on the architecture of the available JDK/JRE (set through `JAVA_HOME`), -the appropriate 64-bit(x64) or 32-bit(x86) service will be installed. This -information is made available during install: +This will launch the uninstallation process. -["source","sh",subs="attributes"] --------------------------------------------------- -c:\elasticsearch-{version}{backslash}bin>elasticsearch-service install -Installing service : "elasticsearch-service-x64" -Using JAVA_HOME (64-bit): "c:\jvm\jdk1.8" -The service 'elasticsearch-service-x64' has been installed. --------------------------------------------------- +[[uninstall-msi-command-line]] +==== Uninstall using the command line -NOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as opposed to a server JVM which offers better performance for long-running applications) its usage is discouraged and a warning will be issued. +Uninstallation can also be performed from the command line by navigating to the directory +containing the `.msi` package and running: -NOTE: The system environment variable `JAVA_HOME` should be set to the path to -the JDK installation that you want the service to use. If you upgrade the JDK, -you are not required to the reinstall the service but you must set the value of -the system environment variable `JAVA_HOME` to the path to the new JDK -installation. However, upgrading across JVM types (e.g. JRE versus SE) is not -supported, and does require the service to be reinstalled. +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +start /wait msiexec.exe /x elasticsearch-{version}.msi /qn +-------------------------------------------- -[[windows-service-settings]] -[float] -=== Customizing service settings +Similar to the install process, a path to a log file for the uninstallation process can +be passed using the `/l` command line argument -The Elasticsearch service can be configured prior to installation by setting the following environment variables (either using the https://technet.microsoft.com/en-us/library/cc754250(v=ws.10).aspx[set command] from the command line, or through the `System Properties->Environment Variables` GUI). +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +start /wait msiexec.exe /x elasticsearch-{version}.msi /qn /l uninstall.log +-------------------------------------------- -[horizontal] -`SERVICE_ID`:: - - A unique identifier for the service. Useful if installing multiple instances on the same machine. Defaults to `elasticsearch-service-x86` (on 32-bit Windows) or `elasticsearch-service-x64` (on 64-bit Windows). - -`SERVICE_USERNAME`:: - - The user to run as, defaults to the local system account. - -`SERVICE_PASSWORD`:: - - The password for the user specified in `%SERVICE_USERNAME%`. - -`SERVICE_DISPLAY_NAME`:: - - The name of the service. Defaults to `Elasticsearch %SERVICE_ID%`. - -`SERVICE_DESCRIPTION`:: - - The description of the service. Defaults to `Elasticsearch Windows Service - https://elastic.co`. - -`JAVA_HOME`:: - - The installation directory of the desired JVM to run the service under. - -`LOG_DIR`:: - - Log directory, defaults to `%ES_HOME%\logs`. - -`DATA_DIR`:: - - Data directory, defaults to `%ES_HOME%\data`. - -`CONF_DIR`:: - - Configuration file directory (which needs to include `elasticsearch.yml` - and `log4j2.properties` files), defaults to `%ES_HOME%\conf`. - -`ES_JAVA_OPTS`:: - - Any additional JVM system properties you may want to apply. - -`ES_START_TYPE`:: - - Startup mode for the service. Can be either `auto` or `manual` (default). - -`ES_STOP_TIMEOUT` :: - - The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`. - -NOTE: At its core, `elasticsearch-service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project -to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled. - -NOTE: On Windows, the <> can be configured as for -any other Elasticsearch installation when running Elasticsearch from the -command line, or when installing Elasticsearch as a service for the -first time. To adjust the heap size for an already installed service, -use the service manager: `bin\elasticsearch-service.bat manager`. - -Using the Manager GUI:: - -It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `elasticsearch-service.bat manager` from the command-line will open up the manager window: - -image::images/service-manager-win.png["Windows Service Manager GUI",align="center"] - -Most changes (like JVM settings) made through the manager GUI will require a restart of the service in order to take affect. - -[[windows-layout]] -==== Directory layout of `.zip` archive - -The `.zip` package is entirely self-contained. All files and directories are, -by default, contained within `%ES_HOME%` -- the directory created when -unpacking the archive. - -This is very convenient because you don't have to create any directories to -start using Elasticsearch, and uninstalling Elasticsearch is as easy as -removing the `%ES_HOME%` directory. However, it is advisable to change the -default locations of the config directory, the data directory, and the logs -directory so that you do not delete important data later on. - - -[cols="> is available that provides the easiest getting started +experience for Windows. You can continue using the `.zip` approach if you prefer. + +The latest stable version of Elasticsearch can be found on the +link:/downloads/elasticsearch[Download Elasticsearch] page. +Other versions can be found on the +link:/downloads/past-releases[Past Releases page]. + +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + +[[install-windows]] +==== Download and install the `.zip` package + +ifeval::["{release-state}"=="unreleased"] + +Version {version} of Elasticsearch has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip + +Unzip it with your favourite unzip tool. This will create a folder called ++elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal +window, `cd` to the `%ES_HOME%` directory, for instance: + +["source","sh",subs="attributes"] +---------------------------- +cd c:\elasticsearch-{version} +---------------------------- + +endif::[] + +[[windows-running]] +==== Running Elasticsearch from the command line + +Elasticsearch can be started from the command line as follows: + +[source,sh] +-------------------------------------------- +.\bin\elasticsearch.bat +-------------------------------------------- + +By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT`, +and can be stopped by pressing `Ctrl-C`. + +[[windows-configuring]] +==== Configuring Elasticsearch on the command line + +Elasticsearch loads its configuration from the `%ES_HOME%\config\elasticsearch.yml` +file by default. The format of this config file is explained in +<>. + +Any settings that can be specified in the config file can also be specified on +the command line, using the `-E` syntax as follows: + +[source,sh] +-------------------------------------------- +.\bin\elasticsearch.bat -Ecluster.name=my_cluster -Enode.name=node_1 +-------------------------------------------- + +NOTE: Values that contain spaces must be surrounded with quotes. For instance `-Epath.logs="C:\My Logs\logs"`. + +TIP: Typically, any cluster-wide settings (like `cluster.name`) should be +added to the `elasticsearch.yml` config file, while any node-specific settings +such as `node.name` could be specified on the command line. + +include::check-running.asciidoc[] + +[[windows-service]] +==== Installing Elasticsearch as a Service on Windows + +Elasticsearch can be installed as a service to run in the background or start +automatically at boot time without any user interaction. This can be achieved +through the `elasticsearch-service.bat` script in the `bin\` folder which allows one to +install, remove, manage or configure the service and potentially start and +stop the service, all from the command-line. + +["source","sh",subs="attributes,callouts"] +-------------------------------------------------- +c:\elasticsearch-{version}{backslash}bin>elasticsearch-service.bat + +Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] +-------------------------------------------------- + +The script requires one parameter (the command to execute) followed by an +optional one indicating the service id (useful when installing multiple +Elasticsearch services). + +The commands available are: + +[horizontal] +`install`:: Install Elasticsearch as a service + +`remove`:: Remove the installed Elasticsearch service (and stop the service if started) + +`start`:: Start the Elasticsearch service (if installed) + +`stop`:: Stop the Elasticsearch service (if started) + +`manager`:: Start a GUI for managing the installed service + +The name of the service and the value of `JAVA_HOME` will be made available during install: + +["source","sh",subs="attributes"] +-------------------------------------------------- +c:\elasticsearch-{version}{backslash}bin>elasticsearch-service.bat install +Installing service : "elasticsearch-service-x64" +Using JAVA_HOME (64-bit): "c:\jvm\jdk1.8" +The service 'elasticsearch-service-x64' has been installed. +-------------------------------------------------- + +NOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as opposed to a server JVM which offers better performance for long-running applications) its usage is discouraged and a warning will be issued. + +NOTE: The system environment variable `JAVA_HOME` should be set to the path to +the JDK installation that you want the service to use. If you upgrade the JDK, +you are not required to the reinstall the service but you must set the value of +the system environment variable `JAVA_HOME` to the path to the new JDK +installation. However, upgrading across JVM types (e.g. JRE versus SE) is not +supported, and does require the service to be reinstalled. + +[[windows-service-settings]] +[float] +=== Customizing service settings + +The Elasticsearch service can be configured prior to installation by setting the following environment variables (either using the https://technet.microsoft.com/en-us/library/cc754250(v=ws.10).aspx[set command] from the command line, or through the `System Properties->Environment Variables` GUI). + +[horizontal] +`SERVICE_ID`:: + + A unique identifier for the service. Useful if installing multiple instances + on the same machine. Defaults to `elasticsearch-service-x64`. + +`SERVICE_USERNAME`:: + + The user to run as, defaults to the local system account. + +`SERVICE_PASSWORD`:: + + The password for the user specified in `%SERVICE_USERNAME%`. + +`SERVICE_DISPLAY_NAME`:: + + The name of the service. Defaults to `Elasticsearch %SERVICE_ID%`. + +`SERVICE_DESCRIPTION`:: + + The description of the service. Defaults to `Elasticsearch Windows Service - https://elastic.co`. + +`JAVA_HOME`:: + + The installation directory of the desired JVM to run the service under. + +`SERVICE_LOG_DIR`:: + + Service log directory, defaults to `%ES_HOME%\logs`. Note that this does + not control the path for the Elasticsearch logs; the path for these is set + via the setting `path.logs` in the `elasticsearch.yml` configuration file, + or on the command line. + +`CONF_DIR`:: + + Configuration file directory (which needs to include `elasticsearch.yml` + and `log4j2.properties` files), defaults to `%ES_HOME%\conf`. + +`ES_JAVA_OPTS`:: + + Any additional JVM system properties you may want to apply. + +`ES_START_TYPE`:: + + Startup mode for the service. Can be either `auto` or `manual` (default). + +`ES_STOP_TIMEOUT` :: + + The timeout in seconds that procrun waits for service to exit gracefully. Defaults to `0`. + +NOTE: At its core, `elasticsearch-service.bat` relies on http://commons.apache.org/proper/commons-daemon/[Apache Commons Daemon] project +to install the service. Environment variables set prior to the service installation are copied and will be used during the service lifecycle. This means any changes made to them after the installation will not be picked up unless the service is reinstalled. + +NOTE: On Windows, the <> can be configured as for +any other Elasticsearch installation when running Elasticsearch from the +command line, or when installing Elasticsearch as a service for the +first time. To adjust the heap size for an already installed service, +use the service manager: `bin\elasticsearch-service.bat manager`. + +Using the Manager GUI:: + +It is also possible to configure the service after it's been installed using the manager GUI (`elasticsearch-service-mgr.exe`), which offers insight into the installed service, including its status, startup type, JVM, start and stop settings amongst other things. Simply invoking `elasticsearch-service.bat manager` from the command-line will open up the manager window: + +image::images/service-manager-win.png["Windows Service Manager GUI",align="center"] + +Most changes (like JVM settings) made through the manager GUI will require a restart of the service in order to take affect. + +[[windows-layout]] +==== Directory layout of `.zip` archive + +The `.zip` package is entirely self-contained. All files and directories are, +by default, contained within `%ES_HOME%` -- the directory created when +unpacking the archive. + +This is very convenient because you don't have to create any directories to +start using Elasticsearch, and uninstalling Elasticsearch is as easy as +removing the `%ES_HOME%` directory. However, it is advisable to change the +default locations of the config directory, the data directory, and the logs +directory so that you do not delete important data later on. + + +[cols=" getNamedXContentParsers() { + ParseField parseField = new ParseField(MatrixStatsAggregationBuilder.NAME); + ContextParser contextParser = (p, name) -> ParsedMatrixStats.fromXContent(p, (String) name); + return singletonList(new NamedXContentRegistry.Entry(Aggregation.class, parseField, contextParser)); + } +} diff --git a/modules/aggs-matrix-stats/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/modules/aggs-matrix-stats/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 00000000000..a2d706a39a6 --- /dev/null +++ b/modules/aggs-matrix-stats/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1 @@ +org.elasticsearch.search.aggregations.matrix.spi.MatrixStatsNamedXContentProvider \ No newline at end of file diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java index a6c6ed834d8..497f3acb7b3 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.matrix.stats.InternalMatrixStats.Fields; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; @@ -38,6 +39,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Predicate; public class InternalMatrixStatsTests extends InternalAggregationTestCase { @@ -170,4 +172,9 @@ public class InternalMatrixStatsTests extends InternalAggregationTestCase matrix.getCorrelation(other, unknownField)); } } + + @Override + protected Predicate excludePathsFromXContentInsertion() { + return path -> path.endsWith(Fields.CORRELATION) || path.endsWith(Fields.COVARIANCE); + } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 2f8f1d7405a..18e34d381a1 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -32,6 +32,7 @@ import org.apache.lucene.analysis.cjk.CJKWidthFilter; import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter; import org.apache.lucene.analysis.commongrams.CommonGramsFilter; import org.apache.lucene.analysis.core.DecimalDigitFilter; +import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.UpperCaseFilter; @@ -52,7 +53,6 @@ import org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilter; import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter; import org.apache.lucene.analysis.miscellaneous.TrimFilter; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; -import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; @@ -98,6 +98,23 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { filters.put("trim", TrimTokenFilterFactory::new); filters.put("word_delimiter", WordDelimiterTokenFilterFactory::new); filters.put("word_delimiter_graph", WordDelimiterGraphTokenFilterFactory::new); + filters.put("unique", UniqueTokenFilterFactory::new); + filters.put("flatten_graph", FlattenGraphTokenFilterFactory::new); + filters.put("length", LengthTokenFilterFactory::new); + filters.put("lowercase", LowerCaseTokenFilterFactory::new); + filters.put("uppercase", UpperCaseTokenFilterFactory::new); + filters.put("nGram", NGramTokenFilterFactory::new); + filters.put("ngram", NGramTokenFilterFactory::new); + filters.put("edgeNGram", EdgeNGramTokenFilterFactory::new); + filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); + filters.put("stemmer", StemmerTokenFilterFactory::new); + filters.put("stemmer_override", requriesAnalysisSettings(StemmerOverrideTokenFilterFactory::new)); + filters.put("kstem", KStemTokenFilterFactory::new); + filters.put("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); + filters.put("hyphenation_decompounder", requriesAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new)); + filters.put("reverse", ReverseTokenFilterFactory::new); + filters.put("elision", ElisionTokenFilterFactory::new); + filters.put("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new)); return filters; } @@ -113,8 +130,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { @Override public Map> getTokenizers() { Map> tokenizers = new TreeMap<>(); - tokenizers.put("simplepattern", SimplePatternTokenizerFactory::new); - tokenizers.put("simplepatternsplit", SimplePatternSplitTokenizerFactory::new); + tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new); + tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); return tokenizers; } @@ -172,7 +189,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { filters.add(PreConfiguredTokenFilter.singleton("nGram", false, NGramTokenFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("reverse", false, input -> new ReverseStringFilter(input))); + filters.add(PreConfiguredTokenFilter.singleton("reverse", false, ReverseStringFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("russian_stem", false, input -> new SnowballFilter(input, "Russian"))); filters.add(PreConfiguredTokenFilter.singleton("scandinavian_folding", true, ScandinavianFoldingFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); @@ -185,7 +202,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { filters.add(PreConfiguredTokenFilter.singleton("trim", false, TrimFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10))); filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("unique", false, input -> new UniqueTokenFilter(input))); + filters.add(PreConfiguredTokenFilter.singleton("unique", false, UniqueTokenFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("uppercase", true, UpperCaseFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, input -> new WordDelimiterFilter(input, @@ -207,6 +224,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { @Override public List getPreConfiguredTokenizers() { List tokenizers = new ArrayList<>(); + tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null)); tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() { @Override public String name() { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DictionaryCompoundWordTokenFilterFactory.java similarity index 90% rename from core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DictionaryCompoundWordTokenFilterFactory.java index fc9719d36b1..e9e690e0b01 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/DictionaryCompoundWordTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis.compound; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter; @@ -33,7 +33,7 @@ import org.elasticsearch.index.IndexSettings; */ public class DictionaryCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory { - public DictionaryCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + DictionaryCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java similarity index 92% rename from core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java index 1d3b8e296ec..af6d30a0354 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { @@ -38,13 +39,13 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { public static final int SIDE_BACK = 2; private final int side; - public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); this.side = parseSide(settings.get("side", "front")); } - + static int parseSide(String side) { switch(side) { case "front": return SIDE_FRONT; @@ -56,19 +57,19 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { @Override public TokenStream create(TokenStream tokenStream) { TokenStream result = tokenStream; - + // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect if (side == SIDE_BACK) { result = new ReverseStringFilter(result); } - + result = new EdgeNGramTokenFilter(result, minGram, maxGram); - + // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect if (side == SIDE_BACK) { result = new ReverseStringFilter(result); } - + return result; } @@ -76,4 +77,4 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { public boolean breaksFastVectorHighlighter() { return true; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java similarity index 82% rename from core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java index 401f2caf03f..94fc52165dd 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ElisionTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ElisionTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.TokenStream; @@ -25,12 +25,15 @@ import org.apache.lucene.analysis.util.ElisionFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.Analysis; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { private final CharArraySet articles; - public ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); this.articles = Analysis.parseArticles(env, indexSettings.getIndexVersionCreated(), settings); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FlattenGraphTokenFilterFactory.java similarity index 84% rename from core/src/main/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FlattenGraphTokenFilterFactory.java index 6c9487a2cb3..e59c23e4a6c 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FlattenGraphTokenFilterFactory.java @@ -17,17 +17,18 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.FlattenGraphFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class FlattenGraphTokenFilterFactory extends AbstractTokenFilterFactory { - public FlattenGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + FlattenGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java similarity index 88% rename from core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index 152d4395ef3..b24eb2c4fbc 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis.compound; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter; @@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.xml.sax.InputSource; +import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -39,7 +40,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW private final HyphenationTree hyphenationTree; - public HyphenationCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + HyphenationCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); @@ -50,7 +51,8 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW Path hyphenationPatternsFile = env.configFile().resolve(hyphenationPatternsPath); try { - hyphenationTree = HyphenationCompoundWordTokenFilter.getHyphenationTree(new InputSource(Files.newInputStream(hyphenationPatternsFile))); + InputStream in = Files.newInputStream(hyphenationPatternsFile); + hyphenationTree = HyphenationCompoundWordTokenFilter.getHyphenationTree(new InputSource(in)); } catch (Exception e) { throw new IllegalArgumentException("Exception while reading hyphenation_patterns_path.", e); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KStemTokenFilterFactory.java similarity index 84% rename from core/src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KStemTokenFilterFactory.java index 24f92ece101..2100e02fb61 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/KStemTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/KStemTokenFilterFactory.java @@ -17,17 +17,18 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.en.KStemFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class KStemTokenFilterFactory extends AbstractTokenFilterFactory { - public KStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + KStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LengthTokenFilterFactory.java similarity index 88% rename from core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LengthTokenFilterFactory.java index 8a03802a7dd..477886d702b 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LengthTokenFilterFactory.java @@ -17,23 +17,24 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.LengthFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class LengthTokenFilterFactory extends AbstractTokenFilterFactory { private final int min; private final int max; - + // ancient unsupported option private static final String ENABLE_POS_INC_KEY = "enable_position_increments"; - public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); min = settings.getAsInt("min", 0); max = settings.getAsInt("max", Integer.MAX_VALUE); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenFilterFactory.java similarity index 89% rename from core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenFilterFactory.java index 1d9ca2272b8..f85db0dae68 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenStream; @@ -27,6 +27,8 @@ import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; /** * Factory for {@link LowerCaseFilter} and some language-specific variants @@ -41,7 +43,7 @@ public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory impl private final String lang; - public LowerCaseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + LowerCaseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.lang = settings.get("language", null); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java similarity index 87% rename from core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java index 7926f585bc3..2d7a8c52fd6 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java @@ -17,13 +17,14 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { @@ -33,7 +34,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { private final int maxGram; - public NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); @@ -43,4 +44,4 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { public TokenStream create(TokenStream tokenStream) { return new NGramTokenFilter(tokenStream, minGram, maxGram); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ReverseTokenFilterFactory.java similarity index 85% rename from core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ReverseTokenFilterFactory.java index 1719841098d..125e1e496b9 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/ReverseTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ReverseTokenFilterFactory.java @@ -17,17 +17,18 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class ReverseTokenFilterFactory extends AbstractTokenFilterFactory { - public ReverseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + ReverseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerOverrideTokenFilterFactory.java similarity index 90% rename from core/src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerOverrideTokenFilterFactory.java index 66643cc2396..f95b4ed76e7 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StemmerOverrideTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerOverrideTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter; @@ -26,6 +26,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.Analysis; import java.io.IOException; import java.util.List; @@ -34,7 +36,7 @@ public class StemmerOverrideTokenFilterFactory extends AbstractTokenFilterFactor private final StemmerOverrideMap overrideMap; - public StemmerOverrideTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException { + StemmerOverrideTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException { super(indexSettings, name, settings); List rules = Analysis.getWordList(env, settings, "rules"); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java similarity index 98% rename from core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index bf83876259b..c94a449afd2 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ar.ArabicStemFilter; @@ -57,6 +57,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.tartarus.snowball.ext.ArmenianStemmer; import org.tartarus.snowball.ext.BasqueStemmer; import org.tartarus.snowball.ext.CatalanStemmer; @@ -86,7 +87,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { private String language; - public StemmerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + StemmerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter"))); } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TruncateTokenFilterFactory.java similarity index 86% rename from core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TruncateTokenFilterFactory.java index 49ea7d6940d..82311964664 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TruncateTokenFilterFactory.java @@ -17,19 +17,20 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class TruncateTokenFilterFactory extends AbstractTokenFilterFactory { private final int length; - public TruncateTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + TruncateTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.length = settings.getAsInt("length", -1); if (length <= 0) { diff --git a/core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java similarity index 92% rename from core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java index cc853932efc..ae2b03f5329 100644 --- a/core/src/main/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilter.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.lucene.analysis.miscellaneous; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.TokenFilter; @@ -31,7 +31,7 @@ import java.io.IOException; * A token filter that generates unique tokens. Can remove unique tokens only on the same * position increments as well. */ -public class UniqueTokenFilter extends TokenFilter { +class UniqueTokenFilter extends TokenFilter { private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class); @@ -39,11 +39,11 @@ public class UniqueTokenFilter extends TokenFilter { private final CharArraySet previous = new CharArraySet(8, false); private final boolean onlyOnSamePosition; - public UniqueTokenFilter(TokenStream in) { + UniqueTokenFilter(TokenStream in) { this(in, false); } - public UniqueTokenFilter(TokenStream in, boolean onlyOnSamePosition) { + UniqueTokenFilter(TokenStream in, boolean onlyOnSamePosition) { super(in); this.onlyOnSamePosition = onlyOnSamePosition; } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java similarity index 86% rename from core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java index 8606a60292c..256e3dad5c0 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/UniqueTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilterFactory.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory { private final boolean onlyOnSamePosition; - public UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.onlyOnSamePosition = settings.getAsBooleanLenientForPreEs6Indices( indexSettings.getIndexVersionCreated(), "only_on_same_position", false, deprecationLogger); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UpperCaseTokenFilterFactory.java similarity index 89% rename from core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UpperCaseTokenFilterFactory.java index 551345fc2e1..7923026d3da 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/UpperCaseTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UpperCaseTokenFilterFactory.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.UpperCaseFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; public class UpperCaseTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index f7313572e13..37bf407df03 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory; import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory; import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory; import org.elasticsearch.index.analysis.HtmlStripCharFilterFactory; +import org.elasticsearch.index.analysis.SynonymTokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase; import java.util.List; @@ -51,13 +52,55 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase { @Override protected Map> getTokenFilters() { Map> filters = new TreeMap<>(super.getTokenFilters()); - filters.put("asciifolding", ASCIIFoldingTokenFilterFactory.class); - filters.put("keywordmarker", KeywordMarkerTokenFilterFactory.class); - filters.put("porterstem", PorterStemTokenFilterFactory.class); - filters.put("snowballporter", SnowballTokenFilterFactory.class); - filters.put("trim", TrimTokenFilterFactory.class); - filters.put("worddelimiter", WordDelimiterTokenFilterFactory.class); - filters.put("worddelimitergraph", WordDelimiterGraphTokenFilterFactory.class); + filters.put("asciifolding", ASCIIFoldingTokenFilterFactory.class); + filters.put("keywordmarker", KeywordMarkerTokenFilterFactory.class); + filters.put("porterstem", PorterStemTokenFilterFactory.class); + filters.put("snowballporter", SnowballTokenFilterFactory.class); + filters.put("trim", TrimTokenFilterFactory.class); + filters.put("worddelimiter", WordDelimiterTokenFilterFactory.class); + filters.put("worddelimitergraph", WordDelimiterGraphTokenFilterFactory.class); + filters.put("flattengraph", FlattenGraphTokenFilterFactory.class); + filters.put("length", LengthTokenFilterFactory.class); + filters.put("greeklowercase", LowerCaseTokenFilterFactory.class); + filters.put("irishlowercase", LowerCaseTokenFilterFactory.class); + filters.put("lowercase", LowerCaseTokenFilterFactory.class); + filters.put("turkishlowercase", LowerCaseTokenFilterFactory.class); + filters.put("uppercase", UpperCaseTokenFilterFactory.class); + filters.put("ngram", NGramTokenFilterFactory.class); + filters.put("edgengram", EdgeNGramTokenFilterFactory.class); + filters.put("bulgarianstem", StemmerTokenFilterFactory.class); + filters.put("englishminimalstem", StemmerTokenFilterFactory.class); + filters.put("englishpossessive", StemmerTokenFilterFactory.class); + filters.put("finnishlightstem", StemmerTokenFilterFactory.class); + filters.put("frenchlightstem", StemmerTokenFilterFactory.class); + filters.put("frenchminimalstem", StemmerTokenFilterFactory.class); + filters.put("galicianminimalstem", StemmerTokenFilterFactory.class); + filters.put("galicianstem", StemmerTokenFilterFactory.class); + filters.put("germanlightstem", StemmerTokenFilterFactory.class); + filters.put("germanminimalstem", StemmerTokenFilterFactory.class); + filters.put("greekstem", StemmerTokenFilterFactory.class); + filters.put("hindistem", StemmerTokenFilterFactory.class); + filters.put("hungarianlightstem", StemmerTokenFilterFactory.class); + filters.put("indonesianstem", StemmerTokenFilterFactory.class); + filters.put("italianlightstem", StemmerTokenFilterFactory.class); + filters.put("latvianstem", StemmerTokenFilterFactory.class); + filters.put("norwegianlightstem", StemmerTokenFilterFactory.class); + filters.put("norwegianminimalstem", StemmerTokenFilterFactory.class); + filters.put("portuguesestem", StemmerTokenFilterFactory.class); + filters.put("portugueselightstem", StemmerTokenFilterFactory.class); + filters.put("portugueseminimalstem", StemmerTokenFilterFactory.class); + filters.put("russianlightstem", StemmerTokenFilterFactory.class); + filters.put("soranistem", StemmerTokenFilterFactory.class); + filters.put("spanishlightstem", StemmerTokenFilterFactory.class); + filters.put("swedishlightstem", StemmerTokenFilterFactory.class); + filters.put("stemmeroverride", StemmerOverrideTokenFilterFactory.class); + filters.put("kstem", KStemTokenFilterFactory.class); + filters.put("synonym", SynonymTokenFilterFactory.class); + filters.put("dictionarycompoundword", DictionaryCompoundWordTokenFilterFactory.class); + filters.put("hyphenationcompoundword", HyphenationCompoundWordTokenFilterFactory.class); + filters.put("reversestring", ReverseTokenFilterFactory.class); + filters.put("elision", ElisionTokenFilterFactory.class); + filters.put("truncate", TruncateTokenFilterFactory.class); return filters; } @@ -136,6 +179,7 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase { @Override protected Map> getPreConfiguredTokenizers() { Map> filters = new TreeMap<>(super.getPreConfiguredTokenizers()); + filters.put("keyword", null); filters.put("lowercase", null); return filters; } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java similarity index 84% rename from core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java index e8734331167..13b512f86e0 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; @@ -29,8 +29,9 @@ import org.elasticsearch.common.lucene.all.AllTokenStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory; -import org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.MyFilterTokenFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.elasticsearch.plugins.AnalysisPlugin; @@ -40,10 +41,10 @@ import org.hamcrest.MatcherAssert; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; -import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItems; @@ -53,12 +54,7 @@ public class CompoundAnalysisTests extends ESTestCase { public void testDefaultsCompoundAnalysis() throws Exception { Settings settings = getJsonSettings(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings), singletonList(new AnalysisPlugin() { - @Override - public Map> getTokenFilters() { - return singletonMap("myfilter", MyFilterTokenFilterFactory::new); - } - })); + AnalysisModule analysisModule = createAnalysisModule(settings); TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); } @@ -75,12 +71,7 @@ public class CompoundAnalysisTests extends ESTestCase { private List analyze(Settings settings, String analyzerName, String text) throws IOException { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); - AnalysisModule analysisModule = new AnalysisModule(new Environment(settings), singletonList(new AnalysisPlugin() { - @Override - public Map> getTokenFilters() { - return singletonMap("myfilter", MyFilterTokenFilterFactory::new); - } - })); + AnalysisModule analysisModule = createAnalysisModule(settings); IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings); Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer(); @@ -99,8 +90,18 @@ public class CompoundAnalysisTests extends ESTestCase { return terms; } + private AnalysisModule createAnalysisModule(Settings settings) throws IOException { + CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin(); + return new AnalysisModule(new Environment(settings), Arrays.asList(commonAnalysisPlugin, new AnalysisPlugin() { + @Override + public Map> getTokenFilters() { + return singletonMap("myfilter", MyFilterTokenFilterFactory::new); + } + })); + } + private Settings getJsonSettings() throws IOException { - String json = "/org/elasticsearch/index/analysis/test1.json"; + String json = "/org/elasticsearch/analysis/common/test1.json"; return Settings.builder() .loadFromStream(json, getClass().getResourceAsStream(json)) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -109,7 +110,7 @@ public class CompoundAnalysisTests extends ESTestCase { } private Settings getYamlSettings() throws IOException { - String yaml = "/org/elasticsearch/index/analysis/test1.yml"; + String yaml = "/org/elasticsearch/analysis/common/test1.yml"; return Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml)) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) diff --git a/core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java similarity index 98% rename from core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java index 259da010daa..fec7f73a697 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java @@ -17,9 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; - -import java.io.IOException; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CannedTokenStream; import org.apache.lucene.analysis.Token; @@ -30,6 +28,8 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; +import java.io.IOException; + public class FlattenGraphTokenFilterFactoryTests extends ESTokenStreamTestCase { public void testBasic() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java similarity index 85% rename from core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 5e1cf2e8179..24efd89b7e0 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; @@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory; +import org.elasticsearch.index.analysis.NGramTokenizerFactory; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -52,7 +54,8 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { final Settings indexSettings = newAnalysisSettingsBuilder().build(); IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings); for (String tokenChars : Arrays.asList("letters", "number", "DIRECTIONALITY_UNDEFINED")) { - final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build(); + final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3) + .put("token_chars", tokenChars).build(); try { new NGramTokenizerFactory(indexProperties, null, name, settings).create(); fail(); @@ -61,7 +64,8 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { } } for (String tokenChars : Arrays.asList("letter", " digit ", "punctuation", "DIGIT", "CoNtRoL", "dash_punctuation")) { - final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build(); + final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3) + .put("token_chars", tokenChars).build(); indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings); new NGramTokenizerFactory(indexProperties, null, name, settings).create(); @@ -73,8 +77,10 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { final Index index = new Index("test", "_na_"); final String name = "ngr"; final Settings indexSettings = newAnalysisSettingsBuilder().build(); - final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 4).putArray("token_chars", new String[0]).build(); - Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(); + final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 4) + .putArray("token_chars", new String[0]).build(); + Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings) + .create(); tokenizer.setReader(new StringReader("1.34")); assertTokenStreamContents(tokenizer, new String[] {"1.", "1.3", "1.34", ".3", ".34", "34"}); } @@ -84,12 +90,15 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { final Index index = new Index("test", "_na_"); final String name = "ngr"; final Settings indexSettings = newAnalysisSettingsBuilder().build(); - Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build(); - Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(); + Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3) + .put("token_chars", "letter,digit").build(); + Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings) + .create(); tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f ")); assertTokenStreamContents(tokenizer, new String[] {"Åb", "Åbc", "bc", "dé", "déf", "éf", "g\uD801\uDC00", "g\uD801\uDC00f", "\uD801\uDC00f"}); - settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build(); + settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3) + .put("token_chars", "letter,digit,punctuation,whitespace,symbol").build(); tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(); tokenizer.setReader(new StringReader(" a!$ 9")); assertTokenStreamContents(tokenizer, @@ -102,12 +111,15 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { final String name = "ngr"; final Settings indexSettings = newAnalysisSettingsBuilder().build(); Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build(); - Tokenizer tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(); + Tokenizer tokenizer = + new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(); tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f ")); assertTokenStreamContents(tokenizer, new String[] {"Åb", "Åbc", "dé", "déf", "g\uD801\uDC00", "g\uD801\uDC00f"}); - settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build(); - tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(); + settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3) + .put("token_chars", "letter,digit,punctuation,whitespace,symbol").build(); + tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings) + .create(); tokenizer.setReader(new StringReader(" a!$ 9")); assertTokenStreamContents(tokenizer, new String[] {" a", " a!"}); @@ -128,7 +140,9 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase { Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build(); Tokenizer tokenizer = new MockTokenizer(); tokenizer.setReader(new StringReader("foo bar")); - TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(tokenizer); + TokenStream edgeNGramTokenFilter = + new EdgeNGramTokenFilterFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings) + .create(tokenizer); if (reverse) { assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class)); } else { diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java similarity index 90% rename from core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java index c4632e57490..10f7653c52c 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -26,6 +26,10 @@ import org.apache.lucene.analysis.snowball.SnowballFilter; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.analysis.AnalysisTestsHelper; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.VersionUtils; @@ -38,6 +42,9 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_C import static org.hamcrest.Matchers.instanceOf; public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { + + private static final CommonAnalysisPlugin PLUGIN = new CommonAnalysisPlugin(); + public void testEnglishFilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { @@ -51,7 +58,7 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_english"); assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class)); Tokenizer tokenizer = new WhitespaceTokenizer(); @@ -79,7 +86,7 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN); TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_porter2"); assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class)); Tokenizer tokenizer = new WhitespaceTokenizer(); diff --git a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java similarity index 97% rename from core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java index 324e422531b..f75822a13c4 100644 --- a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.lucene.analysis.miscellaneous; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockTokenizer; diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 7063437ad46..c0945e047c5 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -27,14 +27,14 @@ - match: { detail.tokenizer.tokens.2.token: od } --- -"simplepattern": +"simple_pattern": - do: indices.analyze: body: text: "a6bf fooo ff61" explain: true tokenizer: - type: simplepattern + type: simple_pattern pattern: "[abcdef0123456789]{4}" - length: { detail.tokenizer.tokens: 2 } - match: { detail.tokenizer.name: _anonymous_tokenizer } @@ -42,14 +42,14 @@ - match: { detail.tokenizer.tokens.1.token: ff61 } --- -"simplepatternsplit": +"simple_pattern_split": - do: indices.analyze: body: text: "foo==bar" explain: true tokenizer: - type: simplepatternsplit + type: simple_pattern_split pattern: == - length: { detail.tokenizer.tokens: 2 } - match: { detail.tokenizer.name: _anonymous_tokenizer } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index eb9dec65542..2283634a80a 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -210,3 +210,361 @@ - match: { detail.tokenfilters.0.tokens.5.start_offset: 16 } - match: { detail.tokenfilters.0.tokens.5.end_offset: 19 } - match: { detail.tokenfilters.0.tokens.5.position: 5 } + +--- +"unique": + - do: + indices.analyze: + body: + text: Foo Foo Bar! + tokenizer: whitespace + filter: [unique] + - length: { tokens: 2 } + - match: { tokens.0.token: Foo } + - match: { tokens.1.token: Bar! } + +--- +"synonym_graph and flatten_graph": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_synonym_graph: + type: synonym_graph + synonyms: ["automatic teller machine,atm,cash point"] + + - do: + indices.analyze: + index: test + body: + text: this automatic teller machine is down + tokenizer: whitespace + filter: [my_synonym_graph] + - length: { tokens: 9 } + - match: { tokens.0.token: this } + - match: { tokens.0.position: 0 } + - is_false: tokens.0.positionLength + - match: { tokens.1.token: atm } + - match: { tokens.1.position: 1 } + - match: { tokens.1.positionLength: 4 } + - match: { tokens.2.token: cash } + - match: { tokens.2.position: 1 } + - is_false: tokens.2.positionLength + - match: { tokens.3.token: automatic } + - match: { tokens.3.position: 1 } + - match: { tokens.3.positionLength: 2 } + - match: { tokens.4.token: point } + - match: { tokens.4.position: 2 } + - match: { tokens.4.positionLength: 3 } + - match: { tokens.5.token: teller } + - match: { tokens.5.position: 3 } + - is_false: tokens.5.positionLength + - match: { tokens.6.token: machine } + - match: { tokens.6.position: 4 } + - is_false: tokens.6.positionLength + - match: { tokens.7.token: is } + - match: { tokens.7.position: 5 } + - is_false: tokens.7.positionLength + - match: { tokens.8.token: down } + - match: { tokens.8.position: 6 } + - is_false: tokens.8.positionLength + + - do: + indices.analyze: + index: test + body: + text: this automatic teller machine is down + tokenizer: whitespace + filter: [my_synonym_graph,flatten_graph] + - length: { tokens: 9 } + - match: { tokens.0.token: this } + - match: { tokens.0.position: 0 } + - is_false: tokens.0.positionLength + - match: { tokens.1.token: atm } + - match: { tokens.1.position: 1 } + - match: { tokens.1.positionLength: 3 } + - match: { tokens.2.token: cash } + - match: { tokens.2.position: 1 } + - is_false: tokens.2.positionLength + - match: { tokens.3.token: automatic } + - match: { tokens.3.position: 1 } + - is_false: tokens.3.positionLength + - match: { tokens.4.token: point } + - match: { tokens.4.position: 2 } + - match: { tokens.4.positionLength: 2 } + - match: { tokens.5.token: teller } + - match: { tokens.5.position: 2 } + - is_false: tokens.5.positionLength + - match: { tokens.6.token: machine } + - match: { tokens.6.position: 3 } + - is_false: tokens.6.positionLength + - match: { tokens.7.token: is } + - match: { tokens.7.position: 4 } + - is_false: tokens.7.positionLength + - match: { tokens.8.token: down } + - match: { tokens.8.position: 5 } + - is_false: tokens.8.positionLength + +--- +"length": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_length: + type: length + min: 6 + - do: + indices.analyze: + index: test + body: + text: foo bar foobar + tokenizer: whitespace + filter: [my_length] + - length: { tokens: 1 } + - match: { tokens.0.token: foobar } + +--- +"uppercase": + - do: + indices.analyze: + body: + text: foobar + tokenizer: keyword + filter: [uppercase] + - length: { tokens: 1 } + - match: { tokens.0.token: FOOBAR } + +--- +"ngram": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_ngram: + type: ngram + min_gram: 3 + max_gram: 3 + - do: + indices.analyze: + index: test + body: + text: foobar + tokenizer: keyword + filter: [my_ngram] + - length: { tokens: 4 } + - match: { tokens.0.token: foo } + - match: { tokens.1.token: oob } + - match: { tokens.2.token: oba } + - match: { tokens.3.token: bar } + +--- +"edge_ngram": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_edge_ngram: + type: edge_ngram + min_gram: 3 + max_gram: 6 + - do: + indices.analyze: + index: test + body: + text: foobar + tokenizer: keyword + filter: [my_edge_ngram] + - length: { tokens: 4 } + - match: { tokens.0.token: foo } + - match: { tokens.1.token: foob } + - match: { tokens.2.token: fooba } + - match: { tokens.3.token: foobar } + +--- +"kstem": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_kstem: + type: kstem + - do: + indices.analyze: + index: test + body: + text: bricks + tokenizer: keyword + filter: [my_kstem] + - length: { tokens: 1 } + - match: { tokens.0.token: brick } + + # use preconfigured token filter: + - do: + indices.analyze: + body: + text: bricks + tokenizer: keyword + filter: [kstem] + - length: { tokens: 1 } + - match: { tokens.0.token: brick } + +--- +"reverse": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_reverse: + type: reverse + - do: + indices.analyze: + index: test + body: + text: foobar + tokenizer: keyword + filter: [my_reverse] + - length: { tokens: 1 } + - match: { tokens.0.token: raboof } + + # use preconfigured token filter: + - do: + indices.analyze: + body: + text: foobar + tokenizer: keyword + filter: [reverse] + - length: { tokens: 1 } + - match: { tokens.0.token: raboof } + +--- +"elision": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_elision: + type: elision + articles: ["l", "m", "t", "qu", "n", "s", "j"] + - do: + indices.analyze: + index: test + body: + text: "l'avion" + tokenizer: keyword + filter: [my_elision] + - length: { tokens: 1 } + - match: { tokens.0.token: avion } + +--- +"stemmer": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_stemmer: + type: stemmer + language: dutch + - do: + indices.analyze: + index: test + body: + text: zoeken + tokenizer: keyword + filter: [my_stemmer] + - length: { tokens: 1 } + - match: { tokens.0.token: zoek } +--- +"stemmer_override": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_stemmer: + type: stemmer + language: dutch + my_stemmer_override: + type: stemmer_override + rules: ["zoeken => override"] + - do: + indices.analyze: + index: test + body: + text: zoeken + tokenizer: keyword + filter: [my_stemmer_override, my_stemmer] + - length: { tokens: 1 } + - match: { tokens.0.token: override } + +--- +"decompounder": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_decompounder: + type: dictionary_decompounder + word_list: [foo, bar] + - do: + indices.analyze: + index: test + body: + text: foobar + tokenizer: keyword + filter: [my_decompounder] + - length: { tokens: 3 } + - match: { tokens.0.token: foobar } + - match: { tokens.1.token: foo } + - match: { tokens.2.token: bar } + +--- +"truncate": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_truncate: + type: truncate + length: 3 + - do: + indices.analyze: + index: test + body: + text: foobar + tokenizer: keyword + filter: [my_truncate] + - length: { tokens: 1 } + - match: { tokens.0.token: foo } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml new file mode 100644 index 00000000000..75dff3c7096 --- /dev/null +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml @@ -0,0 +1,35 @@ +"Synonym filter with char_filter": + # Tests analyze with synonym and char_filter. This is in the analysis-common module + # because there are no char filters in core. + - skip: + version: " - 5.99.99" + reason: to support synonym same analysis chain were added in 6.0.0 + - do: + indices.create: + index: test_synonym_with_charfilter + body: + settings: + index: + analysis: + analyzer: + synonymAnalyzerWithCharfilter: + tokenizer: whitespace + char_filter: ["html_strip"] + filter: ["synonym"] + filter: + synonym: + type: synonym + synonyms: ["

kimchy

=> shay", "dude => elasticsearch", "abides => man!"] + + - do: + indices.analyze: + index: test_synonym_with_charfilter + body: + analyzer: "synonymAnalyzerWithCharfilter" + text: "kimchy is the dude abides" + - length: { tokens: 5 } + - match: { tokens.0.token: shay } + - match: { tokens.1.token: is } + - match: { tokens.2.token: the } + - match: { tokens.3.token: elasticsearch } + - match: { tokens.4.token: man! } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml new file mode 100644 index 00000000000..eb8c9789a63 --- /dev/null +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml @@ -0,0 +1,41 @@ +"ngram search": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + analyzer: + my_analyzer: + tokenizer: standard + filter: [my_ngram] + filter: + my_ngram: + type: ngram + min: 2, + max: 2 + mappings: + doc: + properties: + text: + type: text + analyzer: my_analyzer + + - do: + index: + index: test + type: doc + id: 1 + body: { "text": "foo bar baz" } + refresh: true + + - do: + search: + body: + query: + match: + text: + query: foa + - match: {hits.total: 1} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml new file mode 100644 index 00000000000..b04496965eb --- /dev/null +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml @@ -0,0 +1,129 @@ +"ngram highlighting": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + tokenizer: + my_ngramt: + type: ngram + min_gram: 1 + max_gram: 20 + token_chars: letter,digit + filter: + my_ngram: + type: ngram + min_gram: 1 + max_gram: 20 + analyzer: + name2_index_analyzer: + tokenizer: whitespace + filter: [my_ngram] + name_index_analyzer: + tokenizer: my_ngramt + name_search_analyzer: + tokenizer: whitespace + mappings: + doc: + properties: + name: + type: text + term_vector: with_positions_offsets + analyzer: name_index_analyzer + search_analyzer: name_search_analyzer + name2: + type: text + term_vector: with_positions_offsets + analyzer: name2_index_analyzer + search_analyzer: name_search_analyzer + + - do: + index: + index: test + type: doc + id: 1 + refresh: true + body: + name: logicacmg ehemals avinci - the know how company + name2: logicacmg ehemals avinci - the know how company + + - do: + search: + body: + query: + match: + name: + query: logica m + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + body: + query: + match: + name: + query: logica ma + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + body: + query: + match: + name: + query: logica + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + body: + query: + match: + name2: + query: logica m + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + body: + query: + match: + name2: + query: logica ma + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + body: + query: + match: + name2: + query: logica + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "logicacmg ehemals avinci - the know how company"} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml index cf5ebcea42e..18c3c814625 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.suggest/20_phrase.yml @@ -19,6 +19,9 @@ setup: ngram: tokenizer: standard filter: [lowercase, ngram] + reverse: + tokenizer: standard + filter: [lowercase, reverse] filter: bigram: type: shingle @@ -43,6 +46,9 @@ setup: ngram: type: text analyzer: ngram + reverse: + type: text + analyzer: reverse - do: bulk: @@ -54,6 +60,40 @@ setup: { "body": "Xorr the God-Jewel" } { "index": {} } { "body": "Xorn" } + { "index": {} } + { "body": "Arthur, King of the Britons" } + { "index": {} } + { "body": "Sir Lancelot the Brave" } + { "index": {} } + { "body": "Patsy, Arthur's Servant" } + { "index": {} } + { "body": "Sir Robin the Not-Quite-So-Brave-as-Sir-Lancelot" } + { "index": {} } + { "body": "Sir Bedevere the Wise" } + { "index": {} } + { "body": "Sir Galahad the Pure" } + { "index": {} } + { "body": "Miss Islington, the Witch" } + { "index": {} } + { "body": "Zoot" } + { "index": {} } + { "body": "Leader of Robin's Minstrels" } + { "index": {} } + { "body": "Old Crone" } + { "index": {} } + { "body": "Frank, the Historian" } + { "index": {} } + { "body": "Frank's Wife" } + { "index": {} } + { "body": "Dr. Piglet" } + { "index": {} } + { "body": "Dr. Winston" } + { "index": {} } + { "body": "Sir Robin (Stand-in)" } + { "index": {} } + { "body": "Knight Who Says Ni" } + { "index": {} } + { "body": "Police sergeant who stops the film" } --- "sorts by score": @@ -156,3 +196,27 @@ setup: field: body.bigram analyzer: bigram force_unigrams: false + +--- +"reverse suggestions": + - do: + search: + size: 0 + index: test + body: + suggest: + text: Artur, Ging of the Britons + test: + phrase: + field: body.ngram + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body.reverse + min_word_length: 1 + suggest_mode: always + pre_filter: reverse + post_filter: reverse + + - match: {suggest.test.0.options.0.text: arthur king of the britons} + diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java index 37a2c16e24f..28e568233eb 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SortProcessor.java @@ -24,6 +24,7 @@ import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; @@ -99,17 +100,15 @@ public final class SortProcessor extends AbstractProcessor { throw new IllegalArgumentException("field [" + field + "] is null, cannot sort."); } - if (list.size() <= 1) { - return; - } + List copy = new ArrayList<>(list); if (order.equals(SortOrder.ASCENDING)) { - Collections.sort(list); + Collections.sort(copy); } else { - Collections.sort(list, Collections.reverseOrder()); + Collections.sort(copy, Collections.reverseOrder()); } - document.setFieldValue(targetField, list); + document.setFieldValue(targetField, copy); } @Override diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java index d48c795c5b2..9d37f27bb33 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java @@ -103,10 +103,10 @@ public abstract class AbstractStringProcessorTestCase extends ESTestCase { } public void testTargetField() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); String fieldValue = RandomDocumentPicks.randomString(random()); String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, modifyInput(fieldValue)); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = fieldName + "foo"; Processor processor = newProcessor(fieldName, randomBoolean(), targetFieldName); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(expectedResult(fieldValue))); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java index 45f87241212..5eca68f35de 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java @@ -275,8 +275,8 @@ public class SortProcessorTests extends ESTestCase { } } - public void testSortWithTargetField() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + public void testDescendingSortWithTargetField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); int numItems = randomIntBetween(1, 10); List fieldValue = new ArrayList<>(numItems); List expectedResult = new ArrayList<>(numItems); @@ -285,6 +285,42 @@ public class SortProcessorTests extends ESTestCase { fieldValue.add(value); expectedResult.add(value); } + + Collections.sort(expectedResult, Collections.reverseOrder()); + + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); + String targetFieldName = fieldName + "foo"; + Processor processor = new SortProcessor(randomAlphaOfLength(10), fieldName, + SortOrder.DESCENDING, targetFieldName); + processor.execute(ingestDocument); + assertEquals(ingestDocument.getFieldValue(targetFieldName, List.class), expectedResult); + } + + public void testAscendingSortWithTargetField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + int numItems = randomIntBetween(1, 10); + List fieldValue = new ArrayList<>(numItems); + List expectedResult = new ArrayList<>(numItems); + for (int j = 0; j < numItems; j++) { + String value = randomAlphaOfLengthBetween(1, 10); + fieldValue.add(value); + expectedResult.add(value); + } + + Collections.sort(expectedResult); + + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); + String targetFieldName = fieldName + "foo"; + Processor processor = new SortProcessor(randomAlphaOfLength(10), fieldName, + SortOrder.ASCENDING, targetFieldName); + processor.execute(ingestDocument); + assertEquals(ingestDocument.getFieldValue(targetFieldName, List.class), expectedResult); + } + + public void testSortWithTargetFieldLeavesOriginalUntouched() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + List fieldValue = Arrays.asList(1, 5, 4); + List expectedResult = new ArrayList<>(fieldValue); Collections.sort(expectedResult); SortOrder order = randomBoolean() ? SortOrder.ASCENDING : SortOrder.DESCENDING; @@ -292,11 +328,11 @@ public class SortProcessorTests extends ESTestCase { Collections.reverse(expectedResult); } - String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, new ArrayList<>(fieldValue)); + String targetFieldName = fieldName + "foo"; Processor processor = new SortProcessor(randomAlphaOfLength(10), fieldName, order, targetFieldName); processor.execute(ingestDocument); assertEquals(ingestDocument.getFieldValue(targetFieldName, List.class), expectedResult); + assertEquals(ingestDocument.getFieldValue(fieldName, List.class), fieldValue); } - } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.0.0-snapshot-a0aef2f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 361094b626c..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7bfe234a793f8a1f0556def4e526d040ed636c8 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.0.0-snapshot-ad2cb77.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..19cb0afb1ce --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +6286fec3656b8e8153d33488094d92a134f77e3d \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java index b40a13ef9f0..cb19a604623 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionSearchScript.java @@ -54,7 +54,7 @@ class ExpressionSearchScript implements SearchScript.LeafFactory { } @Override - public boolean needsScores() { + public boolean needs_score() { return needsScores; } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java index 72c54959870..81f76de6c36 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java @@ -47,10 +47,10 @@ public class ExpressionTests extends ESSingleNodeTestCase { } public void testNeedsScores() { - assertFalse(compile("1.2").needsScores()); - assertFalse(compile("doc['d'].value").needsScores()); - assertTrue(compile("1/_score").needsScores()); - assertTrue(compile("doc['d'].value * _score").needsScores()); + assertFalse(compile("1.2").needs_score()); + assertFalse(compile("doc['d'].value").needs_score()); + assertTrue(compile("1/_score").needs_score()); + assertTrue(compile("doc['d'].value * _score").needs_score()); } public void testCompileError() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index e2214c6e992..582ba6f4d5b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -33,6 +33,7 @@ import java.security.cert.Certificate; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.painless.WriterConstants.CLASS_NAME; +import static org.elasticsearch.painless.node.SSource.MainMethodReserved; /** * The Compiler is the entry point for generating a Painless script. The compiler will receive a Painless @@ -143,7 +144,7 @@ final class Compiler { * @param settings The CompilerSettings to be used during the compilation. * @return An executable script that implements both a specified interface and is a subclass of {@link PainlessScript} */ - Constructor compile(Loader loader, String name, String source, CompilerSettings settings) { + Constructor compile(Loader loader, MainMethodReserved reserved, String name, String source, CompilerSettings settings) { if (source.length() > MAXIMUM_SOURCE_LENGTH) { throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH + " characters. The passed in script is " + source.length() + " characters. Consider using a" + @@ -151,7 +152,7 @@ final class Compiler { } ScriptClassInfo scriptClassInfo = new ScriptClassInfo(definition, base); - SSource root = Walker.buildPainlessTree(scriptClassInfo, name, source, settings, definition, + SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, definition, null); root.analyze(definition); root.write(); @@ -183,7 +184,7 @@ final class Compiler { } ScriptClassInfo scriptClassInfo = new ScriptClassInfo(definition, base); - SSource root = Walker.buildPainlessTree(scriptClassInfo, name, source, settings, definition, + SSource root = Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), name, source, settings, definition, debugStream); root.analyze(definition); root.write(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index f8bee4e5cfc..f0897e70935 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -199,14 +199,14 @@ public final class Definition { public static class Method { public final String name; public final Struct owner; - public final boolean augmentation; + public final Class augmentation; public final Type rtn; public final List arguments; public final org.objectweb.asm.commons.Method method; public final int modifiers; public final MethodHandle handle; - public Method(String name, Struct owner, boolean augmentation, Type rtn, List arguments, + public Method(String name, Struct owner, Class augmentation, Type rtn, List arguments, org.objectweb.asm.commons.Method method, int modifiers, MethodHandle handle) { this.name = name; this.augmentation = augmentation; @@ -232,10 +232,10 @@ public final class Definition { // otherwise compute it final Class params[]; final Class returnValue; - if (augmentation) { + if (augmentation != null) { // static method disguised as virtual/interface method params = new Class[1 + arguments.size()]; - params[0] = Augmentation.class; + params[0] = augmentation; for (int i = 0; i < arguments.size(); i++) { params[i + 1] = arguments.get(i).clazz; } @@ -268,9 +268,9 @@ public final class Definition { public void write(MethodWriter writer) { final org.objectweb.asm.Type type; - if (augmentation) { + if (augmentation != null) { assert java.lang.reflect.Modifier.isStatic(modifiers); - type = WriterConstants.AUGMENTATION_TYPE; + type = org.objectweb.asm.Type.getType(augmentation); } else { type = owner.type; } @@ -731,7 +731,7 @@ public final class Definition { " with arguments " + Arrays.toString(classes) + "."); } - final Method constructor = new Method(name, owner, false, returnType, Arrays.asList(args), asm, reflect.getModifiers(), handle); + final Method constructor = new Method(name, owner, null, returnType, Arrays.asList(args), asm, reflect.getModifiers(), handle); owner.constructors.put(methodKey, constructor); } @@ -775,10 +775,14 @@ public final class Definition { } addConstructorInternal(className, "", args); } else { - if (methodName.indexOf("*") >= 0) { - addMethodInternal(className, methodName.substring(0, methodName.length() - 1), true, rtn, args); + int index = methodName.lastIndexOf("."); + + if (index >= 0) { + String augmentation = methodName.substring(0, index); + methodName = methodName.substring(index + 1); + addMethodInternal(className, methodName, augmentation, rtn, args); } else { - addMethodInternal(className, methodName, false, rtn, args); + addMethodInternal(className, methodName, null, rtn, args); } } } else { @@ -787,8 +791,7 @@ public final class Definition { } } - private void addMethodInternal(String struct, String name, boolean augmentation, - Type rtn, Type[] args) { + private void addMethodInternal(String struct, String name, String augmentation, Type rtn, Type[] args) { final Struct owner = structsMap.get(struct); if (owner == null) { @@ -817,14 +820,20 @@ public final class Definition { final Class implClass; final Class[] params; - if (augmentation == false) { + if (augmentation == null) { implClass = owner.clazz; params = new Class[args.length]; for (int count = 0; count < args.length; ++count) { params[count] = args[count].clazz; } } else { - implClass = Augmentation.class; + try { + implClass = Class.forName(augmentation); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("Augmentation class [" + augmentation + "]" + + " not found for struct [" + struct + "] using method name [" + name + "].", cnfe); + } + params = new Class[args.length + 1]; params[0] = owner.clazz; for (int count = 0; count < args.length; ++count) { @@ -862,9 +871,10 @@ public final class Definition { } final int modifiers = reflect.getModifiers(); - final Method method = new Method(name, owner, augmentation, rtn, Arrays.asList(args), asm, modifiers, handle); + final Method method = + new Method(name, owner, augmentation == null ? null : implClass, rtn, Arrays.asList(args), asm, modifiers, handle); - if (augmentation == false && java.lang.reflect.Modifier.isStatic(modifiers)) { + if (augmentation == null && java.lang.reflect.Modifier.isStatic(modifiers)) { owner.staticMethods.put(methodKey, method); } else { owner.methods.put(methodKey, method); @@ -966,8 +976,8 @@ public final class Definition { // TODO: we *have* to remove all these public members and use getter methods to encapsulate! final Class impl; final Class arguments[]; - if (method.augmentation) { - impl = Augmentation.class; + if (method.augmentation != null) { + impl = method.augmentation; arguments = new Class[method.arguments.size() + 1]; arguments[0] = method.owner.clazz; for (int i = 0; i < method.arguments.size(); i++) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTestAugmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTestAugmentation.java new file mode 100644 index 00000000000..c1ea19defb9 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FeatureTestAugmentation.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class FeatureTestAugmentation { + public static int getTotal(FeatureTest ft) { + return ft.getX() + ft.getY(); + } + + public static int addToTotal(FeatureTest ft, int add) { + return getTotal(ft) + add; + } + + private FeatureTestAugmentation() {} +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java index 6bfe911d974..eb2bb1f554e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java @@ -97,8 +97,8 @@ public class FunctionRef { // the Painless$Script class can be inferred if owner is null if (delegateMethod.owner == null) { delegateClassName = CLASS_NAME; - } else if (delegateMethod.augmentation) { - delegateClassName = Augmentation.class.getName(); + } else if (delegateMethod.augmentation != null) { + delegateClassName = delegateMethod.augmentation.getName(); } else { delegateClassName = delegateMethod.owner.clazz.getName(); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index f070cb39a45..39f5c48b65e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless; +import org.apache.logging.log4j.core.tools.Generate; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.component.AbstractComponent; @@ -43,6 +44,7 @@ import java.security.Permissions; import java.security.PrivilegedAction; import java.security.ProtectionDomain; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -50,6 +52,7 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.painless.WriterConstants.OBJECT_TYPE; +import static org.elasticsearch.painless.node.SSource.MainMethodReserved; /** * Implementation of a ScriptEngine for the Painless language. @@ -133,7 +136,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr return new ScriptImpl(painlessScript, p, lookup, context); } @Override - public boolean needsScores() { + public boolean needs_score() { return painlessScript.needs_score(); } }; @@ -156,22 +159,130 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr } }); - compile(contextsToCompilers.get(context), loader, scriptName, scriptSource, params); + MainMethodReserved reserved = new MainMethodReserved(); + compile(contextsToCompilers.get(context), loader, reserved, scriptName, scriptSource, params); - return generateFactory(loader, context); + if (context.statefulFactoryClazz != null) { + return generateFactory(loader, context, reserved, generateStatefulFactory(loader, context, reserved)); + } else { + return generateFactory(loader, context, reserved, WriterConstants.CLASS_TYPE); + } } } /** - * Generates a factory class that will return script instances. - * Uses the newInstance method from a {@link ScriptContext#factoryClazz} to define the factory method - * to create new instances of the {@link ScriptContext#instanceClazz}. + * Generates a stateful factory class that will return script instances. Acts as a middle man between + * the {@link ScriptContext#factoryClazz} and the {@link ScriptContext#instanceClazz} when used so that + * the stateless factory can be used for caching and the stateful factory can act as a cache for new + * script instances. Uses the newInstance method from a {@link ScriptContext#statefulFactoryClazz} to + * define the factory method to create new instances of the {@link ScriptContext#instanceClazz}. * @param loader The {@link ClassLoader} that is used to define the factory class and script class. * @param context The {@link ScriptContext}'s semantics are used to define the factory class. * @param The factory class. * @return A factory class that will return script instances. */ - private T generateFactory(Loader loader, ScriptContext context) { + private Type generateStatefulFactory(Loader loader, ScriptContext context, MainMethodReserved reserved) { + int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; + int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; + String interfaceBase = Type.getType(context.statefulFactoryClazz).getInternalName(); + String className = interfaceBase + "$StatefulFactory"; + String classInterfaces[] = new String[] { interfaceBase }; + + ClassWriter writer = new ClassWriter(classFrames); + writer.visit(WriterConstants.CLASS_VERSION, classAccess, className, null, OBJECT_TYPE.getInternalName(), classInterfaces); + + Method newFactory = null; + + for (Method method : context.factoryClazz.getMethods()) { + if ("newFactory".equals(method.getName())) { + newFactory = method; + + break; + } + } + + for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + writer.visitField(Opcodes.ACC_PRIVATE | Opcodes.ACC_FINAL, "$arg" + count, + Type.getType(newFactory.getParameterTypes()[count]).getDescriptor(), null, null).visitEnd(); + } + + org.objectweb.asm.commons.Method base = + new org.objectweb.asm.commons.Method("", MethodType.methodType(void.class).toMethodDescriptorString()); + org.objectweb.asm.commons.Method init = new org.objectweb.asm.commons.Method("", + MethodType.methodType(void.class, newFactory.getParameterTypes()).toMethodDescriptorString()); + + GeneratorAdapter constructor = new GeneratorAdapter(Opcodes.ASM5, init, + writer.visitMethod(Opcodes.ACC_PUBLIC, init.getName(), init.getDescriptor(), null, null)); + constructor.visitCode(); + constructor.loadThis(); + constructor.invokeConstructor(OBJECT_TYPE, base); + + for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + constructor.loadThis(); + constructor.loadArg(count); + constructor.putField(Type.getType(className), "$arg" + count, Type.getType(newFactory.getParameterTypes()[count])); + } + + constructor.returnValue(); + constructor.endMethod(); + + Method newInstance = null; + + for (Method method : context.statefulFactoryClazz.getMethods()) { + if ("newInstance".equals(method.getName())) { + newInstance = method; + + break; + } + } + + org.objectweb.asm.commons.Method instance = new org.objectweb.asm.commons.Method(newInstance.getName(), + MethodType.methodType(newInstance.getReturnType(), newInstance.getParameterTypes()).toMethodDescriptorString()); + + List> parameters = new ArrayList<>(Arrays.asList(newFactory.getParameterTypes())); + parameters.addAll(Arrays.asList(newInstance.getParameterTypes())); + + org.objectweb.asm.commons.Method constru = new org.objectweb.asm.commons.Method("", + MethodType.methodType(void.class, parameters.toArray(new Class[] {})).toMethodDescriptorString()); + + GeneratorAdapter adapter = new GeneratorAdapter(Opcodes.ASM5, instance, + writer.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_FINAL, + instance.getName(), instance.getDescriptor(), null, null)); + adapter.visitCode(); + adapter.newInstance(WriterConstants.CLASS_TYPE); + adapter.dup(); + + for (int count = 0; count < newFactory.getParameterTypes().length; ++count) { + adapter.loadThis(); + adapter.getField(Type.getType(className), "$arg" + count, Type.getType(newFactory.getParameterTypes()[count])); + } + + adapter.loadArgs(); + adapter.invokeConstructor(WriterConstants.CLASS_TYPE, constru); + adapter.returnValue(); + adapter.endMethod(); + + writeNeedsMethods(context.statefulFactoryClazz, writer, reserved); + writer.visitEnd(); + + loader.defineFactory(className.replace('/', '.'), writer.toByteArray()); + + return Type.getType(className); + } + + /** + * Generates a factory class that will return script instances or stateful factories. + * Uses the newInstance method from a {@link ScriptContext#factoryClazz} to define the factory method + * to create new instances of the {@link ScriptContext#instanceClazz} or uses the newFactory method + * to create new factories of the {@link ScriptContext#statefulFactoryClazz}. + * @param loader The {@link ClassLoader} that is used to define the factory class and script class. + * @param context The {@link ScriptContext}'s semantics are used to define the factory class. + * @param classType The type to be instaniated in the newFactory or newInstance method. Depends + * on whether a {@link ScriptContext#statefulFactoryClazz} is specified. + * @param The factory class. + * @return A factory class that will return script instances. + */ + private T generateFactory(Loader loader, ScriptContext context, MainMethodReserved reserved, Type classType) { int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER| Opcodes.ACC_FINAL; String interfaceBase = Type.getType(context.factoryClazz).getInternalName(); @@ -188,28 +299,41 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr writer.visitMethod(Opcodes.ACC_PUBLIC, init.getName(), init.getDescriptor(), null, null)); constructor.visitCode(); constructor.loadThis(); - constructor.loadArgs(); constructor.invokeConstructor(OBJECT_TYPE, init); constructor.returnValue(); constructor.endMethod(); - Method reflect = context.factoryClazz.getMethods()[0]; + Method reflect = null; + + for (Method method : context.factoryClazz.getMethods()) { + if ("newInstance".equals(method.getName())) { + reflect = method; + + break; + } else if ("newFactory".equals(method.getName())) { + reflect = method; + + break; + } + } + org.objectweb.asm.commons.Method instance = new org.objectweb.asm.commons.Method(reflect.getName(), MethodType.methodType(reflect.getReturnType(), reflect.getParameterTypes()).toMethodDescriptorString()); org.objectweb.asm.commons.Method constru = new org.objectweb.asm.commons.Method("", MethodType.methodType(void.class, reflect.getParameterTypes()).toMethodDescriptorString()); GeneratorAdapter adapter = new GeneratorAdapter(Opcodes.ASM5, instance, - writer.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL, + writer.visitMethod(Opcodes.ACC_PUBLIC | Opcodes.ACC_FINAL, instance.getName(), instance.getDescriptor(), null, null)); adapter.visitCode(); - adapter.newInstance(WriterConstants.CLASS_TYPE); + adapter.newInstance(classType); adapter.dup(); adapter.loadArgs(); - adapter.invokeConstructor(WriterConstants.CLASS_TYPE, constru); + adapter.invokeConstructor(classType, constru); adapter.returnValue(); adapter.endMethod(); + writeNeedsMethods(context.factoryClazz, writer, reserved); writer.visitEnd(); Class factory = loader.defineFactory(className.replace('/', '.'), writer.toByteArray()); @@ -222,6 +346,27 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr } } + private void writeNeedsMethods(Class clazz, ClassWriter writer, MainMethodReserved reserved) { + for (Method method : clazz.getMethods()) { + if (method.getName().startsWith("needs") && + method.getReturnType().equals(boolean.class) && method.getParameterTypes().length == 0) { + String name = method.getName(); + name = name.substring(5); + name = Character.toLowerCase(name.charAt(0)) + name.substring(1); + + org.objectweb.asm.commons.Method needs = new org.objectweb.asm.commons.Method(method.getName(), + MethodType.methodType(boolean.class).toMethodDescriptorString()); + + GeneratorAdapter adapter = new GeneratorAdapter(Opcodes.ASM5, needs, + writer.visitMethod(Opcodes.ACC_PUBLIC, needs.getName(), needs.getDescriptor(), null, null)); + adapter.visitCode(); + adapter.push(reserved.getUsedVariables().contains(name)); + adapter.returnValue(); + adapter.endMethod(); + } + } + } + Object compile(Compiler compiler, String scriptName, String source, Map params, Object... args) { final CompilerSettings compilerSettings; @@ -279,7 +424,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr @Override public Object run() { String name = scriptName == null ? INLINE_NAME : scriptName; - Constructor constructor = compiler.compile(loader, name, source, compilerSettings); + Constructor constructor = compiler.compile(loader, new MainMethodReserved(), name, source, compilerSettings); try { return constructor.newInstance(args); @@ -295,7 +440,8 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr } } - void compile(Compiler compiler, Loader loader, String scriptName, String source, Map params) { + void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, + String scriptName, String source, Map params) { final CompilerSettings compilerSettings; if (params.isEmpty()) { @@ -341,7 +487,7 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr @Override public Void run() { String name = scriptName == null ? INLINE_NAME : scriptName; - compiler.compile(loader, name, source, compilerSettings); + compiler.compile(loader, reserved, name, source, compilerSettings); return null; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 6d044dcd916..4aa36ba3714 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -174,11 +174,10 @@ import java.util.List; */ public final class Walker extends PainlessParserBaseVisitor { - public static SSource buildPainlessTree(ScriptClassInfo mainMethod, String sourceName, + public static SSource buildPainlessTree(ScriptClassInfo mainMethod, MainMethodReserved reserved, String sourceName, String sourceText, CompilerSettings settings, Definition definition, Printer debugStream) { - return new Walker(mainMethod, sourceName, sourceText, settings, definition, - debugStream).source; + return new Walker(mainMethod, reserved, sourceName, sourceText, settings, definition, debugStream).source; } private final ScriptClassInfo scriptClassInfo; @@ -193,9 +192,10 @@ public final class Walker extends PainlessParserBaseVisitor { private final Globals globals; private int syntheticCounter = 0; - private Walker(ScriptClassInfo scriptClassInfo, String sourceName, String sourceText, + private Walker(ScriptClassInfo scriptClassInfo, MainMethodReserved reserved, String sourceName, String sourceText, CompilerSettings settings, Definition definition, Printer debugStream) { this.scriptClassInfo = scriptClassInfo; + this.reserved.push(reserved); this.debugStream = debugStream; this.settings = settings; this.sourceName = Location.computeSourceName(sourceName, sourceText); @@ -252,8 +252,6 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public ANode visitSource(SourceContext ctx) { - reserved.push(new MainMethodReserved()); - List functions = new ArrayList<>(); for (FunctionContext function : ctx.function()) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java index 257f2975c93..59b7a333cf4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java @@ -135,7 +135,7 @@ public final class SFunction extends AStatement { org.objectweb.asm.commons.Method method = new org.objectweb.asm.commons.Method(name, MethodType.methodType(rtnType.clazz, paramClasses).toMethodDescriptorString()); - this.method = new Method(name, null, false, rtnType, paramTypes, method, Modifier.STATIC | Modifier.PRIVATE, null); + this.method = new Method(name, null, null, rtnType, paramTypes, method, Modifier.STATIC | Modifier.PRIVATE, null); } @Override diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt index a1cde1711bc..0f866799820 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt @@ -36,8 +36,8 @@ class CharSequence -> java.lang.CharSequence { IntStream chars() IntStream codePoints() int length() - String replaceAll*(Pattern,Function) - String replaceFirst*(Pattern,Function) + String org.elasticsearch.painless.api.Augmentation.replaceAll(Pattern,Function) + String org.elasticsearch.painless.api.Augmentation.replaceFirst(Pattern,Function) CharSequence subSequence(int,int) String toString() } @@ -53,17 +53,17 @@ class Iterable -> java.lang.Iterable { Iterator iterator() Spliterator spliterator() # some adaptations of groovy methods - boolean any*(Predicate) - Collection asCollection*() - List asList*() - def each*(Consumer) - def eachWithIndex*(ObjIntConsumer) - boolean every*(Predicate) - List findResults*(Function) - Map groupBy*(Function) - String join*(String) - double sum*() - double sum*(ToDoubleFunction) + boolean org.elasticsearch.painless.api.Augmentation.any(Predicate) + Collection org.elasticsearch.painless.api.Augmentation.asCollection() + List org.elasticsearch.painless.api.Augmentation.asList() + def org.elasticsearch.painless.api.Augmentation.each(Consumer) + def org.elasticsearch.painless.api.Augmentation.eachWithIndex(ObjIntConsumer) + boolean org.elasticsearch.painless.api.Augmentation.every(Predicate) + List org.elasticsearch.painless.api.Augmentation.findResults(Function) + Map org.elasticsearch.painless.api.Augmentation.groupBy(Function) + String org.elasticsearch.painless.api.Augmentation.join(String) + double org.elasticsearch.painless.api.Augmentation.sum() + double org.elasticsearch.painless.api.Augmentation.sum(ToDoubleFunction) } # Readable: i/o @@ -756,8 +756,8 @@ class String -> java.lang.String extends CharSequence,Comparable,Object { boolean contentEquals(CharSequence) String copyValueOf(char[]) String copyValueOf(char[],int,int) - String decodeBase64*() - String encodeBase64*() + String org.elasticsearch.painless.api.Augmentation.decodeBase64() + String org.elasticsearch.painless.api.Augmentation.encodeBase64() boolean endsWith(String) boolean equalsIgnoreCase(String) String format(Locale,String,def[]) diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt index aaea78a7a96..4bf1993528b 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.regex.txt @@ -42,7 +42,7 @@ class Matcher -> java.util.regex.Matcher extends Object { boolean find(int) String group() String group(int) - String namedGroup*(String) + String org.elasticsearch.painless.api.Augmentation.namedGroup(String) int groupCount() boolean hasAnchoringBounds() boolean hasTransparentBounds() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt index 66f8f67d869..ba50a30042c 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.util.txt @@ -41,13 +41,13 @@ class Collection -> java.util.Collection extends Iterable { def[] toArray(def[]) # some adaptations of groovy methods - List collect*(Function) - def collect*(Collection,Function) - def find*(Predicate) - List findAll*(Predicate) - def findResult*(Function) - def findResult*(def,Function) - List split*(Predicate) + List org.elasticsearch.painless.api.Augmentation.collect(Function) + def org.elasticsearch.painless.api.Augmentation.collect(Collection,Function) + def org.elasticsearch.painless.api.Augmentation.find(Predicate) + List org.elasticsearch.painless.api.Augmentation.findAll(Predicate) + def org.elasticsearch.painless.api.Augmentation.findResult(Function) + def org.elasticsearch.painless.api.Augmentation.findResult(def,Function) + List org.elasticsearch.painless.api.Augmentation.split(Predicate) } class Comparator -> java.util.Comparator { @@ -123,7 +123,7 @@ class List -> java.util.List extends Collection,Iterable { def remove(int) void replaceAll(UnaryOperator) def set(int,def) - int getLength*() + int org.elasticsearch.painless.api.Augmentation.getLength() void sort(Comparator) List subList(int,int) } @@ -163,17 +163,17 @@ class Map -> java.util.Map { Collection values() # some adaptations of groovy methods - List collect*(BiFunction) - def collect*(Collection,BiFunction) - int count*(BiPredicate) - def each*(BiConsumer) - boolean every*(BiPredicate) - Map.Entry find*(BiPredicate) - Map findAll*(BiPredicate) - def findResult*(BiFunction) - def findResult*(def,BiFunction) - List findResults*(BiFunction) - Map groupBy*(BiFunction) + List org.elasticsearch.painless.api.Augmentation.collect(BiFunction) + def org.elasticsearch.painless.api.Augmentation.collect(Collection,BiFunction) + int org.elasticsearch.painless.api.Augmentation.count(BiPredicate) + def org.elasticsearch.painless.api.Augmentation.each(BiConsumer) + boolean org.elasticsearch.painless.api.Augmentation.every(BiPredicate) + Map.Entry org.elasticsearch.painless.api.Augmentation.find(BiPredicate) + Map org.elasticsearch.painless.api.Augmentation.findAll(BiPredicate) + def org.elasticsearch.painless.api.Augmentation.findResult(BiFunction) + def org.elasticsearch.painless.api.Augmentation.findResult(def,BiFunction) + List org.elasticsearch.painless.api.Augmentation.findResults(BiFunction) + Map org.elasticsearch.painless.api.Augmentation.groupBy(BiFunction) } class Map.Entry -> java.util.Map$Entry { diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index ce78f8a6315..94ccc701331 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -156,6 +156,8 @@ class org.elasticsearch.painless.FeatureTest -> org.elasticsearch.painless.Featu boolean overloadedStatic(boolean) Object twoFunctionsOfX(Function,Function) void listInput(List) + int org.elasticsearch.painless.FeatureTestAugmentation.getTotal() + int org.elasticsearch.painless.FeatureTestAugmentation.addToTotal(int) } class org.elasticsearch.search.lookup.FieldLookup -> org.elasticsearch.search.lookup.FieldLookup extends Object { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java index acf698e2fc7..8618194028b 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AugmentationTests.java @@ -188,4 +188,15 @@ public class AugmentationTests extends ScriptTestCase { exec("Map m = new TreeMap(); m.a = -1; m.b = 1; " + "return m.groupBy((key,value) -> value < 0 ? 'negative' : 'positive')")); } + + public void testFeatureTest() { + assertEquals(5, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + + " ft.setX(3); ft.setY(2); return ft.getTotal()")); + assertEquals(5, exec("def ft = new org.elasticsearch.painless.FeatureTest();" + + " ft.setX(3); ft.setY(2); return ft.getTotal()")); + assertEquals(8, exec("org.elasticsearch.painless.FeatureTest ft = new org.elasticsearch.painless.FeatureTest();" + + " ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); + assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTest();" + + " ft.setX(3); ft.setY(2); return ft.addToTotal(3)")); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java index 2717d20b1f4..23362265474 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FactoryTests.java @@ -30,6 +30,7 @@ public class FactoryTests extends ScriptTestCase { protected Collection> scriptContexts() { Collection> contexts = super.scriptContexts(); + contexts.add(StatefulFactoryTestScript.CONTEXT); contexts.add(FactoryTestScript.CONTEXT); contexts.add(EmptyTestScript.CONTEXT); contexts.add(TemplateScript.CONTEXT); @@ -37,6 +38,89 @@ public class FactoryTests extends ScriptTestCase { return contexts; } + public abstract static class StatefulFactoryTestScript { + private final int x; + private final int y; + + public StatefulFactoryTestScript(int x, int y, int a, int b) { + this.x = x*a; + this.y = y*b; + } + + public int getX() { + return x; + } + + public int getY() { + return y*2; + } + + public int getC() { + return -1; + } + + public int getD() { + return 2; + } + + public static final String[] PARAMETERS = new String[] {"test"}; + public abstract Object execute(int test); + + public abstract boolean needsTest(); + public abstract boolean needsNothing(); + public abstract boolean needsX(); + public abstract boolean needsC(); + public abstract boolean needsD(); + + public interface StatefulFactory { + StatefulFactoryTestScript newInstance(int a, int b); + + boolean needsTest(); + boolean needsNothing(); + boolean needsX(); + boolean needsC(); + boolean needsD(); + } + + public interface Factory { + StatefulFactory newFactory(int x, int y); + + boolean needsTest(); + boolean needsNothing(); + boolean needsX(); + boolean needsC(); + boolean needsD(); + } + + public static final ScriptContext CONTEXT = + new ScriptContext<>("test", StatefulFactoryTestScript.Factory.class); + } + + public void testStatefulFactory() { + StatefulFactoryTestScript.Factory factory = scriptEngine.compile( + "stateful_factory_test", "test + x + y + d", StatefulFactoryTestScript.CONTEXT, Collections.emptyMap()); + StatefulFactoryTestScript.StatefulFactory statefulFactory = factory.newFactory(1, 2); + StatefulFactoryTestScript script = statefulFactory.newInstance(3, 4); + assertEquals(24, script.execute(3)); + statefulFactory.newInstance(5, 6); + assertEquals(28, script.execute(7)); + assertEquals(true, script.needsTest()); + assertEquals(false, script.needsNothing()); + assertEquals(true, script.needsX()); + assertEquals(false, script.needsC()); + assertEquals(true, script.needsD()); + assertEquals(true, statefulFactory.needsTest()); + assertEquals(false, statefulFactory.needsNothing()); + assertEquals(true, statefulFactory.needsX()); + assertEquals(false, statefulFactory.needsC()); + assertEquals(true, statefulFactory.needsD()); + assertEquals(true, factory.needsTest()); + assertEquals(false, factory.needsNothing()); + assertEquals(true, factory.needsX()); + assertEquals(false, factory.needsC()); + assertEquals(true, factory.needsD()); + } + public abstract static class FactoryTestScript { private final Map params; @@ -53,6 +137,9 @@ public class FactoryTests extends ScriptTestCase { public interface Factory { FactoryTestScript newInstance(Map params); + + boolean needsTest(); + boolean needsNothing(); } public static final ScriptContext CONTEXT = @@ -68,6 +155,8 @@ public class FactoryTests extends ScriptTestCase { script = factory.newInstance(Collections.singletonMap("test", 3)); assertEquals(5, script.execute(2)); assertEquals(2, script.execute(-1)); + assertEquals(true, factory.needsTest()); + assertEquals(false, factory.needsNothing()); } public abstract static class EmptyTestScript { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java index 021cb311869..cae7a0e4291 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/NeedsScoreTests.java @@ -44,19 +44,19 @@ public class NeedsScoreTests extends ESSingleNodeTestCase { SearchScript.Factory factory = service.compile(null, "1.2", SearchScript.CONTEXT, Collections.emptyMap()); SearchScript.LeafFactory ss = factory.newFactory(Collections.emptyMap(), lookup); - assertFalse(ss.needsScores()); + assertFalse(ss.needs_score()); factory = service.compile(null, "doc['d'].value", SearchScript.CONTEXT, Collections.emptyMap()); ss = factory.newFactory(Collections.emptyMap(), lookup); - assertFalse(ss.needsScores()); + assertFalse(ss.needs_score()); factory = service.compile(null, "1/_score", SearchScript.CONTEXT, Collections.emptyMap()); ss = factory.newFactory(Collections.emptyMap(), lookup); - assertTrue(ss.needsScores()); + assertTrue(ss.needs_score()); factory = service.compile(null, "doc['d'].value * _score", SearchScript.CONTEXT, Collections.emptyMap()); ss = factory.newFactory(Collections.emptyMap(), lookup); - assertTrue(ss.needsScores()); + assertTrue(ss.needs_score()); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index 910c4940ab5..c29260163c0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -164,7 +164,7 @@ public class PainlessDocGenerator { emitAnchor(stream, method); stream.print("]]"); - if (false == method.augmentation && Modifier.isStatic(method.modifiers)) { + if (null == method.augmentation && Modifier.isStatic(method.modifiers)) { stream.print("static "); } @@ -268,12 +268,12 @@ public class PainlessDocGenerator { stream.print("link:{"); stream.print(root); stream.print("-javadoc}/"); - stream.print((method.augmentation ? Augmentation.class : method.owner.clazz).getName().replace('.', '/')); + stream.print((method.augmentation != null ? method.augmentation : method.owner.clazz).getName().replace('.', '/')); stream.print(".html#"); stream.print(methodName(method)); stream.print("%2D"); boolean first = true; - if (method.augmentation) { + if (method.augmentation != null) { first = false; stream.print(method.owner.clazz.getName()); } @@ -309,7 +309,7 @@ public class PainlessDocGenerator { * Pick the javadoc root for a {@link Method}. */ private static String javadocRoot(Method method) { - if (method.augmentation) { + if (method.augmentation != null) { return "painless"; } return javadocRoot(method.owner); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 24cfd29547a..2e99f652c0a 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless; import junit.framework.AssertionFailedError; - import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; @@ -37,6 +36,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.painless.node.SSource.MainMethodReserved; import static org.hamcrest.Matchers.hasSize; /** @@ -96,8 +96,7 @@ public abstract class ScriptTestCase extends ESTestCase { CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings())); - Walker.buildPainlessTree(scriptClassInfo, getTestName(), script, pickySettings, - definition, null); + Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), getTestName(), script, pickySettings, definition, null); } // test actual script execution ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, compileParams); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 7d3115fdb5e..ee208991a79 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -31,8 +31,8 @@ import org.elasticsearch.painless.FeatureTest; import org.elasticsearch.painless.GenericElasticsearchScript; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; -import org.elasticsearch.painless.ScriptClassInfo; import org.elasticsearch.painless.Operation; +import org.elasticsearch.painless.ScriptClassInfo; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.test.ESTestCase; @@ -42,6 +42,7 @@ import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; +import static org.elasticsearch.painless.node.SSource.MainMethodReserved; /** * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. @@ -902,8 +903,8 @@ public class NodeToStringTests extends ESTestCase { CompilerSettings compilerSettings = new CompilerSettings(); compilerSettings.setRegexesEnabled(true); try { - return Walker.buildPainlessTree(scriptClassInfo, getTestName(), code, compilerSettings, - definition, null); + return Walker.buildPainlessTree( + scriptClassInfo, new MainMethodReserved(), getTestName(), code, compilerSettings, definition, null); } catch (Exception e) { throw new AssertionError("Failed to compile: " + code, e); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhase.java b/modules/parent-join/src/main/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhase.java index 2eb0ddc3fe1..583c2707ec3 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhase.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhase.java @@ -23,10 +23,9 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.join.mapper.ParentIdFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.SearchContext; @@ -62,14 +61,14 @@ public final class ParentJoinFieldSubFetchPhase implements FetchSubPhase { parentId = getSortedDocValue(parentMapper.name(), hitContext.reader(), hitContext.docId()); } - Map fields = hitContext.hit().fieldsOrNull(); + Map fields = hitContext.hit().fieldsOrNull(); if (fields == null) { fields = new HashMap<>(); hitContext.hit().fields(fields); } - fields.put(mapper.name(), new SearchHitField(mapper.name(), Collections.singletonList(joinName))); + fields.put(mapper.name(), new DocumentField(mapper.name(), Collections.singletonList(joinName))); if (parentId != null) { - fields.put(parentMapper.name(), new SearchHitField(parentMapper.name(), Collections.singletonList(parentId))); + fields.put(parentMapper.name(), new DocumentField(parentMapper.name(), Collections.singletonList(parentId))); } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index ecabbb096ea..b5408d2123b 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -162,7 +163,7 @@ public final class ParentJoinFieldMapper extends FieldMapper { checkParentFields(name(), parentIdFields); MetaJoinFieldMapper unique = new MetaJoinFieldMapper.Builder().build(context); return new ParentJoinFieldMapper(name, fieldType, context.indexSettings(), - unique, Collections.unmodifiableList(parentIdFields)); + unique, Collections.unmodifiableList(parentIdFields), eagerGlobalOrdinals); } } @@ -183,15 +184,21 @@ public final class ParentJoinFieldMapper extends FieldMapper { iterator.remove(); continue; } - final String parent = entry.getKey(); - Set children; - if (XContentMapValues.isArray(entry.getValue())) { - children = new HashSet<>(Arrays.asList(XContentMapValues.nodeStringArrayValue(entry.getValue()))); - } else { - children = Collections.singleton(entry.getValue().toString()); + if ("relations".equals(entry.getKey())) { + Map relations = XContentMapValues.nodeMapValue(entry.getValue(), "relations"); + for (Iterator> relIt = relations.entrySet().iterator(); relIt.hasNext(); ) { + Map.Entry relation = relIt.next(); + final String parent = relation.getKey(); + Set children; + if (XContentMapValues.isArray(relation.getValue())) { + children = new HashSet<>(Arrays.asList(XContentMapValues.nodeStringArrayValue(relation.getValue()))); + } else { + children = Collections.singleton(relation.getValue().toString()); + } + builder.addParent(parent, children); + } + iterator.remove(); } - builder.addParent(parent, children); - iterator.remove(); } return builder; } @@ -235,16 +242,19 @@ public final class ParentJoinFieldMapper extends FieldMapper { // The meta field that ensures that there is no other parent-join in the mapping private MetaJoinFieldMapper uniqueFieldMapper; private List parentIdFields; + private boolean eagerGlobalOrdinals; protected ParentJoinFieldMapper(String simpleName, MappedFieldType fieldType, Settings indexSettings, MetaJoinFieldMapper uniqueFieldMapper, - List parentIdFields) { + List parentIdFields, + boolean eagerGlobalOrdinals) { super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, MultiFields.empty(), null); this.parentIdFields = parentIdFields; this.uniqueFieldMapper = uniqueFieldMapper; this.uniqueFieldMapper.setFieldMapper(this); + this.eagerGlobalOrdinals = eagerGlobalOrdinals; } @Override @@ -337,6 +347,7 @@ public final class ParentJoinFieldMapper extends FieldMapper { if (conflicts.isEmpty() == false) { throw new IllegalStateException("invalid update for join field [" + name() + "]:\n" + conflicts.toString()); } + this.eagerGlobalOrdinals = joinMergeWith.eagerGlobalOrdinals; this.parentIdFields = Collections.unmodifiableList(newParentIdFields); this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper, updateAllTypes); uniqueFieldMapper.setFieldMapper(this); @@ -423,6 +434,8 @@ public final class ParentJoinFieldMapper extends FieldMapper { @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { builder.field("type", contentType()); + builder.field("eager_global_ordinals", eagerGlobalOrdinals); + builder.startObject("relations"); for (ParentIdFieldMapper field : parentIdFields) { if (field.getChildren().size() == 1) { builder.field(field.getParentName(), field.getChildren().iterator().next()); @@ -430,6 +443,7 @@ public final class ParentJoinFieldMapper extends FieldMapper { builder.field(field.getParentName(), field.getChildren()); } } + builder.endObject(); } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index 61f050bff3f..cf3d0207bb7 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.Weight; +import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -44,7 +45,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.join.mapper.ParentIdFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.subphase.InnerHitsContext; import org.elasticsearch.search.internal.SearchContext; @@ -126,7 +126,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { TopDocs[] result = new TopDocs[hits.length]; for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; - SearchHitField joinField = hit.getFields().get(joinFieldMapper.name()); + DocumentField joinField = hit.getFields().get(joinFieldMapper.name()); if (joinField == null) { result[i] = Lucene.EMPTY_TOP_DOCS; continue; @@ -150,7 +150,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { .add(joinFieldMapper.fieldType().termQuery(typeName, qsc), BooleanClause.Occur.FILTER) .build(); } else { - SearchHitField parentIdField = hit.getFields().get(parentIdFieldMapper.name()); + DocumentField parentIdField = hit.getFields().get(parentIdFieldMapper.name()); q = context.mapperService().fullName(IdFieldMapper.NAME).termQuery(parentIdField.getValue(), qsc); } @@ -206,7 +206,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { } else if (isChildHit(hit)) { DocumentMapper hitDocumentMapper = mapperService.documentMapper(hit.getType()); final String parentType = hitDocumentMapper.parentFieldMapper().type(); - SearchHitField parentField = hit.field(ParentFieldMapper.NAME); + DocumentField parentField = hit.field(ParentFieldMapper.NAME); if (parentField == null) { throw new IllegalStateException("All children must have a _parent"); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java b/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java new file mode 100644 index 00000000000..25024101461 --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.spi; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ContextParser; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder; +import org.elasticsearch.join.aggregations.ParsedChildren; +import org.elasticsearch.plugins.spi.NamedXContentProvider; +import org.elasticsearch.search.aggregations.Aggregation; + +import java.util.List; + +import static java.util.Collections.singletonList; + +public class ParentJoinNamedXContentProvider implements NamedXContentProvider { + + @Override + public List getNamedXContentParsers() { + ParseField parseField = new ParseField(ChildrenAggregationBuilder.NAME); + ContextParser contextParser = (p, name) -> ParsedChildren.fromXContent(p, (String) name); + return singletonList(new NamedXContentRegistry.Entry(Aggregation.class, parseField, contextParser)); + } +} diff --git a/modules/parent-join/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/modules/parent-join/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 00000000000..48687c21c32 --- /dev/null +++ b/modules/parent-join/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1 @@ +org.elasticsearch.join.spi.ParentJoinNamedXContentProvider \ No newline at end of file diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java index 59467d49c82..12bb2f700e3 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateResponse; @@ -59,8 +60,11 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; public class ChildrenIT extends ParentChildTestCase { + + private static final Map categoryToControl = new HashMap<>(); + @Before public void setupCluster() throws Exception { categoryToControl.clear(); @@ -73,8 +77,9 @@ public class ChildrenIT extends ParentChildTestCase { } else { assertAcked( prepareCreate("test") - .addMapping("doc", "category", "type=keyword", "join_field", "type=join,article=comment", - "commenter", "type=keyword") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), + "commenter", "keyword", "category", "keyword")) ); } @@ -248,7 +253,9 @@ public class ChildrenIT extends ParentChildTestCase { } else { assertAcked( prepareCreate(indexName) - .addMapping("doc", "join_field", "type=join,parent=child", "count", "type=long") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "name", "keyword")) ); } @@ -318,17 +325,19 @@ public class ChildrenIT extends ParentChildTestCase { prepareCreate(indexName) .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.mapping.single_type", false)) + .put("index.version.created", Version.V_5_6_0)) // multi type .addMapping(masterType, "brand", "type=text", "name", "type=keyword", "material", "type=text") .addMapping(childType, "_parent", "type=masterprod", "color", "type=keyword", "size", "type=keyword") ); } else { assertAcked( prepareCreate(indexName) - .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) - .addMapping("doc", "join_field", "type=join," + masterType + "=" + childType, "brand", "type=text", - "name", "type=keyword", "material", "type=text", "color", "type=keyword", "size", "type=keyword") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + masterType, childType), + "brand", "text", "name", "keyword", "material", "text", "color", "keyword", "size", "keyword")) ); } @@ -391,7 +400,7 @@ public class ChildrenIT extends ParentChildTestCase { assertAcked( prepareCreate(indexName) .setSettings(Settings.builder() - .put("index.mapping.single_type", false) + .put("index.version.created", Version.V_5_6_0) // multi type ).addMapping(grandParentType, "name", "type=keyword") .addMapping(parentType, "_parent", "type=" + grandParentType) .addMapping(childType, "_parent", "type=" + parentType) @@ -400,8 +409,10 @@ public class ChildrenIT extends ParentChildTestCase { } else { assertAcked( prepareCreate(indexName) - .addMapping("doc", "join_field", "type=join," + grandParentType + "=" + parentType + "," + - parentType + "=" + childType, "name", "type=keyword") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + grandParentType, parentType, parentType, childType), + "name", "keyword")) ); } @@ -449,8 +460,10 @@ public class ChildrenIT extends ParentChildTestCase { } else { assertAcked( prepareCreate("index") - .addMapping("doc", "join_field", "type=join,parentType=childType", "name", "type=keyword", - "town", "type=keyword", "age", "type=integer") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "parentType", "childType"), + "name", "keyword", "town", "keyword", "age", "integer")) ); } List requests = new ArrayList<>(); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhaseTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhaseTests.java index 7eb2c8f3576..72bb1629cad 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhaseTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/fetch/ParentJoinFieldSubFetchPhaseTests.java @@ -49,9 +49,11 @@ public class ParentJoinFieldSubFetchPhaseTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .field("child", "grand_child") - .field("product", "item") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .field("product", "item") + .endObject() .endObject() .endObject() .endObject().string(); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java index d1b726a02de..068f39d5971 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/mapper/ParentJoinFieldMapperTests.java @@ -50,7 +50,9 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") + .startObject("relations") + .field("parent", "child") + .endObject() .endObject() .endObject() .endObject().string(); @@ -97,8 +99,10 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .field("child", "grand_child") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() .endObject() .endObject() .endObject().string(); @@ -176,8 +180,10 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { String mapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .array("child", "grand_child1", "grand_child2") + .startObject("relations") + .field("parent", "child") + .array("child", "grand_child1", "grand_child2") + .endObject() .endObject() .endObject().endObject().string(); IndexService indexService = createIndex("test"); @@ -189,7 +195,9 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { final String updateMapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") .field("type", "join") - .array("child", "grand_child1", "grand_child2") + .startObject("relations") + .array("child", "grand_child1", "grand_child2") + .endObject() .endObject() .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, @@ -202,8 +210,10 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { final String updateMapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .field("child", "grand_child1") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child1") + .endObject() .endObject() .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, @@ -216,9 +226,11 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { final String updateMapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") .field("type", "join") - .field("uber_parent", "parent") - .field("parent", "child") - .array("child", "grand_child1", "grand_child2") + .startObject("relations") + .field("uber_parent", "parent") + .field("parent", "child") + .array("child", "grand_child1", "grand_child2") + .endObject() .endObject() .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, @@ -230,10 +242,12 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { { final String updateMapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") - .field("type", "join") - .field("parent", "child") - .array("child", "grand_child1", "grand_child2") - .field("grand_child2", "grand_grand_child") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .array("child", "grand_child1", "grand_child2") + .field("grand_child2", "grand_grand_child") + .endObject() .endObject() .endObject().endObject().string(); IllegalStateException exc = expectThrows(IllegalStateException.class, @@ -246,8 +260,10 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { final String updateMapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") .field("type", "join") - .array("parent", "child", "child2") - .array("child", "grand_child1", "grand_child2") + .startObject("relations") + .array("parent", "child", "child2") + .array("child", "grand_child1", "grand_child2") + .endObject() .endObject() .endObject().endObject().string(); docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), @@ -264,9 +280,11 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { final String updateMapping = XContentFactory.jsonBuilder().startObject().startObject("properties") .startObject("join_field") .field("type", "join") - .array("parent", "child", "child2") - .array("child", "grand_child1", "grand_child2") - .array("other", "child_other1", "child_other2") + .startObject("relations") + .array("parent", "child", "child2") + .array("child", "grand_child1", "grand_child2") + .array("other", "child_other1", "child_other2") + .endObject() .endObject() .endObject().endObject().string(); docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), @@ -288,7 +306,9 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") + .startObject("relations") + .field("parent", "child") + .endObject() .endObject() .endObject() .endObject() @@ -308,7 +328,9 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("fields") .startObject("join_field") .field("type", "join") - .field("parent", "child") + .startObject("relations") + .field("parent", "child") + .endObject() .endObject() .endObject() .endObject() @@ -328,12 +350,16 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .field("child", "grand_child") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() .endObject() .startObject("another_join_field") .field("type", "join") - .field("product", "item") + .startObject("relations") + .field("product", "item") + .endObject() .endObject() .endObject() .endObject().string(); @@ -347,8 +373,10 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .field("child", "grand_child") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() .endObject() .endObject() .endObject().string(); @@ -372,8 +400,10 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", "child") - .field("child", "grand_child") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() .endObject() .endObject() .endObject().string(); @@ -392,12 +422,14 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase { .startObject("join_field") .field("type", "join") .field("eager_global_ordinals", false) - .field("parent", "child") - .field("child", "grand_child") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() .endObject() .endObject() .endObject().string(); - docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), + service.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertNotNull(service.mapperService().fullName("join_field#parent")); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index e7a57328b6e..14503086ab2 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; @@ -102,7 +101,8 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("grandchild", "_parent", "type=child")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child,child=grandchild")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "parent", "child", "child", "grandchild"))); } ensureGreen(); @@ -164,7 +164,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("test", "_parent", "type=foo")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,foo=test")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "foo", "test"))); } ensureGreen(); @@ -188,7 +188,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -295,7 +295,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); List builders = new ArrayList<>(); @@ -339,7 +339,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); Map> parentToChildren = new HashMap<>(); @@ -393,7 +393,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -467,7 +467,9 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent", "c_field", "type=keyword")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child", "c_field", "type=keyword")); + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "c_field", "keyword"))); } ensureGreen(); @@ -511,7 +513,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); // index simple data @@ -551,7 +553,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -584,7 +586,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -613,7 +615,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -650,7 +652,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -724,7 +726,9 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") .startObject("join_field") .field("type", "join") - .field("parent", new String[] {"child", "child1"}) + .startObject("relations") + .field("parent", new String[] {"child", "child1"}) + .endObject() .endObject() .endObject().endObject().endObject() )); @@ -818,7 +822,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -863,7 +867,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -893,7 +897,6 @@ public class ChildQuerySearchIT extends ParentChildTestCase { assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("2")); } - @AwaitsFix(bugUrl = "wait for inner hits to be fixed") public void testHasChildInnerHitsHighlighting() throws Exception { if (legacy()) { assertAcked(prepareCreate("test") @@ -901,7 +904,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -931,7 +934,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -969,7 +972,8 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent", "c_field", "type=keyword")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child", "p_field", "type=keyword", "c_field", "type=keyword")); + .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "c_field", "keyword", "p_field", "keyword"))); } ensureGreen(); @@ -1022,7 +1026,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1089,7 +1093,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1124,7 +1128,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { } else { assertAcked(prepareCreate("test") .setSettings("index.refresh_interval", -1) - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1195,7 +1199,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .put(indexSettings()) .put("index.refresh_interval", -1) ) - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1223,7 +1227,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1294,9 +1298,11 @@ public class ChildQuerySearchIT extends ParentChildTestCase { assertAcked(prepareCreate("grandissue") .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") .startObject("join_field") - .field("type", "join") - .field("grandparent", "parent") - .field("parent", new String[] {"child_type_one", "child_type_two"}) + .field("type", "join") + .startObject("relations") + .field("grandparent", "parent") + .field("parent", new String[] {"child_type_one", "child_type_two"}) + .endObject() .endObject() .endObject().endObject().endObject() )); @@ -1350,7 +1356,9 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child", "objects", "type=nested")); + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "objects", "nested"))); } ensureGreen(); @@ -1396,7 +1404,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1503,7 +1511,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { } else { assertAcked(prepareCreate("test") .setSettings("index.refresh_interval", -1) - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1551,7 +1559,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); for (int i = 0; i < 10; i++) { @@ -1599,7 +1607,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -1683,7 +1691,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } ensureGreen(); @@ -2001,7 +2009,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { .addMapping("parent-type").addMapping("child-type", "_parent", "type=parent-type")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent-type=child-type")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent-type", "child-type"))); } createIndexRequest("test", "child-type", "child-id", "parent-id").get(); createIndexRequest("test", "parent-type", "parent-id", null).get(); @@ -2017,7 +2025,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { assertSearchHits(searchResponse, "child-id"); } - public void testHighlightersIgnoreParentChild() { + public void testHighlightersIgnoreParentChild() throws IOException { if (legacy()) { assertAcked(prepareCreate("test") .addMapping("parent-type", "searchText", "type=text,term_vector=with_positions_offsets,index_options=offsets") @@ -2025,8 +2033,20 @@ public class ChildQuerySearchIT extends ParentChildTestCase { "type=text,term_vector=with_positions_offsets,index_options=offsets")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent-type=child-type", - "searchText", "type=text,term_vector=with_positions_offsets,index_options=offsets")); + .addMapping("doc", jsonBuilder().startObject().startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent-type", "child-type") + .endObject() + .endObject() + .startObject("searchText") + .field("type", "text") + .field("term_vector", "with_positions_offsets") + .field("index_options", "offsets") + .endObject() + .endObject().endObject() + )); } createIndexRequest("test", "parent-type", "parent-id", null, "searchText", "quick brown fox").get(); createIndexRequest("test", "child-type", "child-id", "parent-id", "searchText", "quick brown fox").get(); @@ -2069,8 +2089,7 @@ public class ChildQuerySearchIT extends ParentChildTestCase { ); } else { assertAcked(prepareCreate("my-index") - .addMapping("doc", "join_field", "type=join,parent=child") - ); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } createIndexRequest("my-index", "parent", "1", null).get(); createIndexRequest("my-index", "child", "2", "1").get(); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 0eb890f52ef..660485ee01c 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -32,10 +32,10 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.IdsQueryBuilder; @@ -63,6 +63,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -95,17 +96,42 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase> nodePlugins() { - return Arrays.asList(ParentJoinPlugin.class, CustomScriptPlugin.class); + ArrayList> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(CustomScriptPlugin.class); + return plugins; } public static class CustomScriptPlugin extends MockScriptPlugin { @@ -90,9 +92,22 @@ public class InnerHitsIT extends ParentChildTestCase { ); } else { assertAcked(prepareCreate("articles") - .addMapping("doc", "join_field", "type=join,article=comment", "title", "type=text", - "message", "type=text,fielddata=true") - ); + .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("article", "comment") + .endObject() + .endObject() + .startObject("title") + .field("type", "text") + .endObject() + .startObject("message") + .field("type", "text") + .field("fielddata", true) + .endObject() + .endObject().endObject().endObject() + )); } List requests = new ArrayList<>(); @@ -173,8 +188,10 @@ public class InnerHitsIT extends ParentChildTestCase { assertAcked(prepareCreate("idx") .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") .startObject("join_field") - .field("type", "join") - .field("parent", new String[] {"child1", "child2"}) + .field("type", "join") + .startObject("relations") + .field("parent", new String[] {"child1", "child2"}) + .endObject() .endObject() .endObject().endObject().endObject() )); @@ -261,8 +278,8 @@ public class InnerHitsIT extends ParentChildTestCase { ); } else { assertAcked(prepareCreate("stack") - .addMapping("doc", "join_field", "type=join,question=answer", "body", "type=text") - ); + .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "question", "answer"), + "body", "text"))); } List requests = new ArrayList<>(); requests.add(createIndexRequest("stack", "question", "1", null, "body", "I'm using HTTPS + Basic authentication " @@ -308,9 +325,9 @@ public class InnerHitsIT extends ParentChildTestCase { ); } else { assertAcked(prepareCreate("articles") - .addMapping("doc", "join_field", "type=join,article=comment,comment=remark", - "title", "type=text", "message", "type=text") - ); + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "article", "comment", "comment", "remark"), "title", "text", "message", "text"))); } List requests = new ArrayList<>(); @@ -376,10 +393,9 @@ public class InnerHitsIT extends ParentChildTestCase { .addMapping("baron", "_parent", "type=earl") ); } else { - assertAcked( - prepareCreate("royals") - .addMapping("doc", "join_field", "type=join,king=prince,prince=duke,duke=earl,earl=baron") - ); + assertAcked(prepareCreate("royals") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "king", "prince", "prince", "duke", "duke", "earl", "earl", "baron"))); } List requests = new ArrayList<>(); @@ -452,7 +468,7 @@ public class InnerHitsIT extends ParentChildTestCase { .addMapping("child", "_parent", "type=parent")); } else { assertAcked(prepareCreate("index") - .addMapping("doc", "join_field", "type=join,parent=child")); + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } List requests = new ArrayList<>(); requests.add(createIndexRequest("index", "parent", "1", null)); @@ -495,7 +511,8 @@ public class InnerHitsIT extends ParentChildTestCase { if (legacy()) { assertAcked(prepareCreate("index1").addMapping("child", "_parent", "type=parent")); } else { - assertAcked(prepareCreate("index1").addMapping("doc", "join_field", "type=join,parent=child")); + assertAcked(prepareCreate("index1") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); } List requests = new ArrayList<>(); requests.add(createIndexRequest("index1", "parent", "1", null)); @@ -517,7 +534,8 @@ public class InnerHitsIT extends ParentChildTestCase { .addMapping("child_type", "_parent", "type=parent_type", "nested_type", "type=nested")); } else { assertAcked(prepareCreate("test") - .addMapping("doc", "join_field", "type=join,parent_type=child_type", "nested_type", "type=nested")); + .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "parent_type", "child_type"), "nested_type", "nested"))); } createIndexRequest("test", "parent_type", "1", null, "key", "value").get(); createIndexRequest("test", "child_type", "2", "1", "nested_type", Collections.singletonMap("key", "value")).get(); @@ -545,7 +563,9 @@ public class InnerHitsIT extends ParentChildTestCase { ); } else { assertAcked(prepareCreate("index1") - .addMapping("doc", "join_field", "type=join,parent_type=child_type", "nested_type", "type=nested") + .addMapping("doc", addFieldMappings( + buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent_type", "child_type"), + "nested_type", "nested")) ); } assertAcked(prepareCreate("index2")); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java index 2bf6a0f2d3b..395b18ebb5e 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java @@ -114,7 +114,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase> nodePlugins() { - return Collections.singleton(ParentJoinPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); } @Override @@ -60,7 +63,7 @@ public abstract class ParentChildTestCase extends ESIntegTestCase { .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true); if (legacy()) { - builder.put("index.mapping.single_type", false); + builder.put("index.version.created", Version.V_5_6_0); } return builder.build(); @@ -84,6 +87,39 @@ public abstract class ParentChildTestCase extends ESIntegTestCase { return createIndexRequest(index, type, id, parentId, source); } + public static Map buildParentJoinFieldMappingFromSimplifiedDef(String joinFieldName, + boolean eagerGlobalOrdinals, + String... relations) { + Map fields = new HashMap<>(); + + Map joinField = new HashMap<>(); + joinField.put("type", "join"); + joinField.put("eager_global_ordinals", eagerGlobalOrdinals); + Map relationMap = new HashMap<>(); + for (int i = 0; i < relations.length; i+=2) { + String[] children = relations[i+1].split(","); + if (children.length > 1) { + relationMap.put(relations[i], children); + } else { + relationMap.put(relations[i], children[0]); + } + } + joinField.put("relations", relationMap); + fields.put(joinFieldName, joinField); + return Collections.singletonMap("properties", fields); + } + + @SuppressWarnings("unchecked") + public static Map addFieldMappings(Map map, String... fields) { + Map propsMap = (Map) map.get("properties"); + for (int i = 0; i < fields.length; i+=2) { + String field = fields[i]; + String type = fields[i + 1]; + propsMap.put(field, Collections.singletonMap("type", type)); + } + return map; + } + private IndexRequestBuilder createIndexRequest(String index, String type, String id, String parentId, Map source) { String name = type; if (legacy() == false) { diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java index 43d036458b4..afca17b3f80 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentIdQueryBuilderTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.QueryShardException; @@ -44,6 +45,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -71,15 +73,38 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase { public static final String NAME = "percolate"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class)); + static final ParseField DOCUMENT_FIELD = new ParseField("document"); private static final ParseField QUERY_FIELD = new ParseField("field"); private static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type"); @@ -93,6 +98,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder types = mapperService.types(); + if (types.size() != 1) { + throw new IllegalStateException("Only a single type should exist, but [" + types.size() + " types exists"); + } + String type = types.iterator().next(); + if (documentType != null) { + DEPRECATION_LOGGER.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); + if (documentType.equals(type) == false) { + throw new IllegalArgumentException("specified document_type [" + documentType + + "] is not equal to the actual type [" + type + "]"); + } + } + docMapper = mapperService.documentMapper(type); + doc = docMapper.parse(source(context.index().getName(), type, "_temp_id", document, documentXContentType)); + } else { + if (documentType == null) { + throw new IllegalArgumentException("[percolate] query is missing required [document_type] parameter"); + } + DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); + docMapper = docMapperForType.getDocumentMapper(); + doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType)); + } FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer(); // Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when @@ -425,18 +489,10 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder extractedTerms = new ArrayList<>(); LeafReader reader = indexReader.leaves().get(0).reader(); - Fields fields = reader.fields(); - for (String field : fields) { - Terms terms = fields.terms(field); + for (FieldInfo info : reader.getFieldInfos()) { + Terms terms = reader.terms(info.name); if (terms == null) { continue; } - BytesRef fieldBr = new BytesRef(field); + BytesRef fieldBr = new BytesRef(info.name); TermsEnum tenum = terms.iterator(); for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { BytesRefBuilder builder = new BytesRefBuilder(); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index 97533d23ee4..dc0d3db0559 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -33,11 +33,11 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -52,7 +52,7 @@ import java.util.Map; */ public final class PercolatorHighlightSubFetchPhase extends HighlightPhase { - public PercolatorHighlightSubFetchPhase(Settings settings, Map highlighters) { + PercolatorHighlightSubFetchPhase(Settings settings, Map highlighters) { super(settings, highlighters); } @@ -93,7 +93,7 @@ public final class PercolatorHighlightSubFetchPhase extends HighlightPhase { if (query != null) { subSearchContext.parsedQuery(new ParsedQuery(query)); hitContext.reset( - new SearchHit(0, "unknown", new Text(percolateQuery.getDocumentType()), Collections.emptyMap()), + new SearchHit(0, "unknown", new Text(hit.getType()), Collections.emptyMap()), percolatorLeafReaderContext, 0, percolatorIndexSearcher ); hitContext.cache().clear(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 80af45d1075..aaef648cb05 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -40,7 +40,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.ConstantScoreScorer; -import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FilterScorer; @@ -291,7 +290,7 @@ public class CandidateQueryTests extends ESSingleNodeTestCase { private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException { boolean requireScore = randomBoolean(); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - Query percolateQuery = fieldType.percolateQuery("type", queryStore, new BytesArray("{}"), percolateSearcher); + Query percolateQuery = fieldType.percolateQuery(queryStore, new BytesArray("{}"), percolateSearcher); Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery); TopDocs topDocs = shardSearcher.search(query, 10); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index dd518ba1438..dbea02c3040 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.internal.SearchContext; @@ -86,13 +87,16 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase new PercolateQueryBuilder("_field", null, new BytesArray("{}"), XContentType.JSON)); - assertThat(e.getMessage(), equalTo("[document_type] is a required argument")); - e = expectThrows(IllegalArgumentException.class, () -> new PercolateQueryBuilder("_field", "_document_type", null, null)); assertThat(e.getMessage(), equalTo("[document] is a required argument")); @@ -199,11 +199,6 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase { - new PercolateQueryBuilder("_field", null, "_index", "_type", "_id", null, null, null); - }); - assertThat(e.getMessage(), equalTo("[document_type] is a required argument")); - e = expectThrows(IllegalArgumentException.class, () -> { new PercolateQueryBuilder("_field", "_document_type", null, "_type", "_id", null, null, null); }); @@ -221,9 +216,15 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase parseQuery("{\"percolate\" : { \"document\": {}}")); - assertThat(e.getMessage(), equalTo("[percolate] query is missing required [document_type] parameter")); + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder queryBuilder = parseQuery("{\"percolate\" : { \"document\": {}, \"field\":\"" + queryField + "\"}}"); + if (indexVersionCreated.before(Version.V_6_0_0_alpha1)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> queryBuilder.toQuery(queryShardContext)); + assertThat(e.getMessage(), equalTo("[percolate] query is missing required [document_type] parameter")); + } else { + queryBuilder.toQuery(queryShardContext); + } } public void testCreateMultiDocumentSearcher() throws Exception { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java index c5588ed312f..c76ac14cffb 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java @@ -116,20 +116,20 @@ public class PercolateQueryTests extends ESTestCase { memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); // no scoring, wrapping it in a constant score query: - Query query = new ConstantScoreQuery(new PercolateQuery("type", queryStore, new BytesArray("a"), + Query query = new ConstantScoreQuery(new PercolateQuery(queryStore, new BytesArray("a"), new TermQuery(new Term("select", "a")), percolateSearcher, new MatchNoDocsQuery(""))); TopDocs topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(1)); + assertThat(topDocs.totalHits, equalTo(1L)); assertThat(topDocs.scoreDocs.length, equalTo(1)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); Explanation explanation = shardSearcher.explain(query, 0); assertThat(explanation.isMatch(), is(true)); assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score)); - query = new ConstantScoreQuery(new PercolateQuery("type", queryStore, new BytesArray("b"), + query = new ConstantScoreQuery(new PercolateQuery(queryStore, new BytesArray("b"), new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery(""))); topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(3)); + assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); explanation = shardSearcher.explain(query, 1); @@ -146,15 +146,15 @@ public class PercolateQueryTests extends ESTestCase { assertThat(explanation.isMatch(), is(true)); assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score)); - query = new ConstantScoreQuery(new PercolateQuery("type", queryStore, new BytesArray("c"), + query = new ConstantScoreQuery(new PercolateQuery(queryStore, new BytesArray("c"), new MatchAllDocsQuery(), percolateSearcher, new MatchAllDocsQuery())); topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(4)); + assertThat(topDocs.totalHits, equalTo(4L)); - query = new PercolateQuery("type", queryStore, new BytesArray("{}"), new TermQuery(new Term("select", "b")), + query = new PercolateQuery(queryStore, new BytesArray("{}"), new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery("")); topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(3)); + assertThat(topDocs.totalHits, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(3)); explanation = shardSearcher.explain(query, 3); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java index ddd01a1a9d9..f210b851fd0 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -46,7 +46,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testHitsExecutionNeeded() { PercolateQuery percolateQuery = new PercolateQuery( - "", ctx -> null, new BytesArray("{}"), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery() + ctx -> null, new BytesArray("{}"), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery() ); PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY, emptyMap()); @@ -61,7 +61,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testLocatePercolatorQuery() { PercolateQuery percolateQuery = new PercolateQuery( - "", ctx -> null, new BytesArray("{}"), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery() + ctx -> null, new BytesArray("{}"), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery() ); assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()), nullValue()); BooleanQuery.Builder bq = new BooleanQuery.Builder(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 4f099adb7b7..cbd56400cab 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -23,36 +23,19 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.lookup.LeafDocLookup; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; +import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; @@ -67,6 +50,7 @@ import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery; import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.containsString; @@ -75,44 +59,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; -public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { - - @Override - protected Collection> getPlugins() { - return Arrays.asList(PercolatorPlugin.class, CustomScriptPlugin.class); - } - - public static class CustomScriptPlugin extends MockScriptPlugin { - @Override - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = new HashMap<>(); - scripts.put("1==1", vars -> Boolean.TRUE); - scripts.put("use_fielddata_please", vars -> { - LeafDocLookup leafDocLookup = (LeafDocLookup) vars.get("_doc"); - ScriptDocValues scriptDocValues = leafDocLookup.get("employees.name"); - return "virginia_potts".equals(scriptDocValues.get(0)); - }); - return scripts; - } - } - - public void testPercolateScriptQuery() throws IOException { - client().admin().indices().prepareCreate("index").addMapping("type", "query", "type=percolator").get(); - client().prepareIndex("index", "type", "1") - .setSource(jsonBuilder().startObject().field("query", QueryBuilders.scriptQuery( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1==1", Collections.emptyMap()))).endObject()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .execute().actionGet(); - SearchResponse response = client().prepareSearch("index") - .setQuery(new PercolateQueryBuilder("query", "type", jsonBuilder().startObject().field("field1", "b").endObject().bytes(), - XContentType.JSON)) - .get(); - assertHitCount(response, 1); - assertSearchHits(response, "1"); - } +public class PercolatorQuerySearchIT extends ESIntegTestCase { public void testPercolatorQuery() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") ); @@ -132,7 +82,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { BytesReference source = jsonBuilder().startObject().endObject().bytes(); logger.info("percolating empty doc"); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -140,7 +90,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { source = jsonBuilder().startObject().field("field1", "value").endObject().bytes(); logger.info("percolating doc with 1 field"); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .addSort("_uid", SortOrder.ASC) .get(); assertHitCount(response, 2); @@ -150,7 +100,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { source = jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject().bytes(); logger.info("percolating doc with 2 fields"); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .addSort("_uid", SortOrder.ASC) .get(); assertHitCount(response, 3); @@ -160,7 +110,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { } public void testPercolatorRangeQueries() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", "type=long", "field2", "type=double", "field3", "type=ip", "field4", "type=date", "query", "type=percolator") ); @@ -214,7 +164,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { // Test long range: BytesReference source = jsonBuilder().startObject().field("field1", 12).endObject().bytes(); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("3")); @@ -222,7 +172,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { source = jsonBuilder().startObject().field("field1", 11).endObject().bytes(); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -230,7 +180,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { // Test double range: source = jsonBuilder().startObject().field("field2", 12).endObject().bytes(); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("6")); @@ -238,7 +188,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { source = jsonBuilder().startObject().field("field2", 11).endObject().bytes(); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("4")); @@ -246,7 +196,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { // Test IP range: source = jsonBuilder().startObject().field("field3", "192.168.1.5").endObject().bytes(); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("9")); @@ -254,7 +204,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { source = jsonBuilder().startObject().field("field3", "192.168.1.4").endObject().bytes(); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("7")); @@ -262,14 +212,14 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { // Test date range: source = jsonBuilder().startObject().field("field4", "2016-05-15").endObject().bytes(); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("10")); } public void testPercolatorQueryExistingDocument() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", "type=keyword", "field2", "type=keyword", "query", "type=percolator") ); @@ -292,14 +242,14 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { logger.info("percolating empty doc"); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "1", null, null, null)) + .setQuery(new PercolateQueryBuilder("query", "test", "type", "1", null, null, null)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); logger.info("percolating doc with 1 field"); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "5", null, null, null)) + .setQuery(new PercolateQueryBuilder("query", "test", "type", "5", null, null, null)) .addSort("_uid", SortOrder.ASC) .get(); assertHitCount(response, 2); @@ -308,7 +258,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { logger.info("percolating doc with 2 fields"); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "6", null, null, null)) + .setQuery(new PercolateQueryBuilder("query", "test", "type", "6", null, null, null)) .addSort("_uid", SortOrder.ASC) .get(); assertHitCount(response, 3); @@ -318,7 +268,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { } public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "_source", "enabled=false", "field1", "type=keyword", "query", "type=percolator") ); @@ -332,7 +282,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { logger.info("percolating empty doc with source disabled"); Throwable e = expectThrows(SearchPhaseExecutionException.class, () -> { client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "1", null, null, null)) + .setQuery(new PercolateQueryBuilder("query", "test", "type", "1", null, null, null)) .get(); }).getRootCause(); assertThat(e, instanceOf(IllegalArgumentException.class)); @@ -340,7 +290,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { } public void testPercolatorSpecificQueries() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", "type=text", "field2", "type=text", "query", "type=percolator") ); @@ -396,7 +346,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { .field("field2", "the quick brown fox falls down into the well") .endObject().bytes(); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .addSort("_uid", SortOrder.ASC) .get(); assertHitCount(response, 4); @@ -418,7 +368,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { } else if (randomBoolean()) { fieldMapping.append(",index_options=offsets"); } - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", fieldMapping, "query", "type=percolator") ); client().prepareIndex("test", "type", "1") @@ -442,7 +392,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { .field("field1", "The quick brown fox jumps over the lazy dog") .endObject().bytes(); SearchResponse searchResponse = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "type", document, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) .highlighter(new HighlightBuilder().field("field1")) .addSort("_uid", SortOrder.ASC) .get(); @@ -461,7 +411,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { } public void testTakePositionOffsetGapIntoAccount() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field", "type=text,position_increment_gap=5", "query", "type=percolator") ); client().prepareIndex("test", "type", "1") @@ -475,7 +425,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { client().admin().indices().prepareRefresh().get(); SearchResponse response = client().prepareSearch().setQuery( - new PercolateQueryBuilder("query", "type", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) + new PercolateQueryBuilder("query", null, new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON) ).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("2")); @@ -484,13 +434,13 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { public void testManyPercolatorFields() throws Exception { String queryFieldName = randomAlphaOfLength(8); - createIndex("test1", client().admin().indices().prepareCreate("test1") + assertAcked(client().admin().indices().prepareCreate("test1") .addMapping("type", queryFieldName, "type=percolator", "field", "type=keyword") ); - createIndex("test2", client().admin().indices().prepareCreate("test2") + assertAcked(client().admin().indices().prepareCreate("test2") .addMapping("type", queryFieldName, "type=percolator", "second_query_field", "type=percolator", "field", "type=keyword") ); - createIndex("test3", client().admin().indices().prepareCreate("test3") + assertAcked(client().admin().indices().prepareCreate("test3") .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("field") .field("type", "keyword") @@ -510,9 +460,9 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { public void testWithMultiplePercolatorFields() throws Exception { String queryFieldName = randomAlphaOfLength(8); - createIndex("test1", client().admin().indices().prepareCreate("test1") + assertAcked(client().admin().indices().prepareCreate("test1") .addMapping("type", queryFieldName, "type=percolator", "field", "type=keyword")); - createIndex("test2", client().admin().indices().prepareCreate("test2") + assertAcked(client().admin().indices().prepareCreate("test2") .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties") .startObject("field") .field("type", "keyword") @@ -542,7 +492,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { BytesReference source = jsonBuilder().startObject().field("field", "value").endObject().bytes(); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder(queryFieldName, "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)) .setIndices("test1") .get(); assertHitCount(response, 1); @@ -551,7 +501,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, "type", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) .setIndices("test2") .get(); assertHitCount(response, 1); @@ -578,7 +528,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { .startObject("companyname").field("type", "text").endObject().startObject("employee").field("type", "nested") .startObject("properties").startObject("name").field("type", "text").endObject().endObject().endObject().endObject() .endObject(); - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("employee", mapping) ); client().prepareIndex("test", "employee", "q1").setSource(jsonBuilder().startObject() @@ -593,7 +543,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { client().admin().indices().prepareRefresh().get(); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "employee", + .setQuery(new PercolateQueryBuilder("query", XContentFactory.jsonBuilder() .startObject().field("companyname", "stark") .startArray("employee") @@ -607,7 +557,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { assertThat(response.getHits().getAt(0).getId(), equalTo("q1")); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "employee", + .setQuery(new PercolateQueryBuilder("query", XContentFactory.jsonBuilder() .startObject().field("companyname", "notstark") .startArray("employee") @@ -620,7 +570,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { assertHitCount(response, 0); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "employee", + .setQuery(new PercolateQueryBuilder("query", XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject().bytes(), XContentType.JSON)) .addSort("_doc", SortOrder.ASC) @@ -628,125 +578,8 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { assertHitCount(response, 0); } - public void testPercolateQueryWithNestedDocuments_doNotLeakBitsetCacheEntries() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject().startObject("properties").startObject("companyname").field("type", "text").endObject() - .startObject("query").field("type", "percolator").endObject() - .startObject("employee").field("type", "nested").startObject("properties") - .startObject("name").field("type", "text").endObject().endObject().endObject().endObject() - .endObject(); - createIndex("test", client().admin().indices().prepareCreate("test") - // to avoid normal document from being cached by BitsetFilterCache - .setSettings(Settings.builder().put(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), false)) - .addMapping("employee", mapping) - ); - client().prepareIndex("test", "employee", "q1").setSource(jsonBuilder().startObject() - .field("query", QueryBuilders.nestedQuery("employee", - QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg) - ).endObject()) - .get(); - client().admin().indices().prepareRefresh().get(); - - for (int i = 0; i < 32; i++) { - SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "employee", - XContentFactory.jsonBuilder() - .startObject().field("companyname", "stark") - .startArray("employee") - .startObject().field("name", "virginia potts").endObject() - .startObject().field("name", "tony stark").endObject() - .endArray() - .endObject().bytes(), XContentType.JSON)) - .addSort("_doc", SortOrder.ASC) - // size 0, because other wise load bitsets for normal document in FetchPhase#findRootDocumentIfNested(...) - .setSize(0) - .get(); - assertHitCount(response, 1); - } - - // We can't check via api... because BitsetCacheListener requires that it can extract shardId from index reader - // and for percolator it can't do that, but that means we don't keep track of - // memory for BitsetCache in case of percolator - long bitsetSize = client().admin().cluster().prepareClusterStats().get() - .getIndicesStats().getSegments().getBitsetMemoryInBytes(); - assertEquals("The percolator works with in-memory index and therefor shouldn't use bitset cache", 0L, bitsetSize); - } - - public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder(); - mapping.startObject(); - { - mapping.startObject("properties"); - { - mapping.startObject("query"); - mapping.field("type", "percolator"); - mapping.endObject(); - } - { - mapping.startObject("companyname"); - mapping.field("type", "text"); - mapping.endObject(); - } - { - mapping.startObject("employees"); - mapping.field("type", "nested"); - { - mapping.startObject("properties"); - { - mapping.startObject("name"); - mapping.field("type", "text"); - mapping.field("fielddata", true); - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - createIndex("test", client().admin().indices().prepareCreate("test") - .addMapping("employee", mapping) - ); - Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "use_fielddata_please", Collections.emptyMap()); - client().prepareIndex("test", "employee", "q1").setSource(jsonBuilder().startObject() - .field("query", QueryBuilders.nestedQuery("employees", - QueryBuilders.scriptQuery(script), ScoreMode.Avg) - ).endObject()).get(); - client().admin().indices().prepareRefresh().get(); - XContentBuilder doc = jsonBuilder(); - doc.startObject(); - { - doc.field("companyname", "stark"); - doc.startArray("employees"); - { - doc.startObject(); - doc.field("name", "virginia_potts"); - doc.endObject(); - } - { - doc.startObject(); - doc.field("name", "tony_stark"); - doc.endObject(); - } - doc.endArray(); - } - doc.endObject(); - for (int i = 0; i < 32; i++) { - SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", "employee", doc.bytes(), XContentType.JSON)) - .addSort("_doc", SortOrder.ASC) - .get(); - assertHitCount(response, 1); - } - - long fieldDataSize = client().admin().cluster().prepareClusterStats().get() - .getIndicesStats().getFieldData().getMemorySizeInBytes(); - assertEquals("The percolator works with in-memory index and therefor shouldn't use field-data cache", 0L, fieldDataSize); - } - public void testPercolatorQueryViaMultiSearch() throws Exception { - createIndex("test", client().admin().indices().prepareCreate("test") + assertAcked(client().admin().indices().prepareCreate("test") .addMapping("type", "field1", "type=text", "query", "type=percolator") ); @@ -772,21 +605,21 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase { MultiSearchResponse response = client().prepareMultiSearch() .add(client().prepareSearch("test") - .setQuery(new PercolateQueryBuilder("query", "type", + .setQuery(new PercolateQueryBuilder("query", jsonBuilder().startObject().field("field1", "b").endObject().bytes(), XContentType.JSON))) .add(client().prepareSearch("test") - .setQuery(new PercolateQueryBuilder("query", "type", + .setQuery(new PercolateQueryBuilder("query", yamlBuilder().startObject().field("field1", "c").endObject().bytes(), XContentType.JSON))) .add(client().prepareSearch("test") - .setQuery(new PercolateQueryBuilder("query", "type", + .setQuery(new PercolateQueryBuilder("query", smileBuilder().startObject().field("field1", "b c").endObject().bytes(), XContentType.JSON))) .add(client().prepareSearch("test") - .setQuery(new PercolateQueryBuilder("query", "type", + .setQuery(new PercolateQueryBuilder("query", jsonBuilder().startObject().field("field1", "d").endObject().bytes(), XContentType.JSON))) .add(client().prepareSearch("test") - .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "5", null, null, null))) + .setQuery(new PercolateQueryBuilder("query", "test", "type", "5", null, null, null))) .add(client().prepareSearch("test") // non existing doc, so error element - .setQuery(new PercolateQueryBuilder("query", "type", "test", "type", "6", null, null, null))) + .setQuery(new PercolateQueryBuilder("query", "test", "type", "6", null, null, null))) .get(); MultiSearchResponse.Item item = response.getResponses()[0]; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java new file mode 100644 index 00000000000..020280670c4 --- /dev/null +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.percolator; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.lookup.LeafDocLookup; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; + +public class PercolatorQuerySearchTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Arrays.asList(PercolatorPlugin.class, CustomScriptPlugin.class); + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + @Override + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + scripts.put("1==1", vars -> Boolean.TRUE); + scripts.put("use_fielddata_please", vars -> { + LeafDocLookup leafDocLookup = (LeafDocLookup) vars.get("_doc"); + ScriptDocValues scriptDocValues = leafDocLookup.get("employees.name"); + return "virginia_potts".equals(scriptDocValues.get(0)); + }); + return scripts; + } + } + + public void testPercolateScriptQuery() throws IOException { + client().admin().indices().prepareCreate("index").addMapping("type", "query", "type=percolator").get(); + client().prepareIndex("index", "type", "1") + .setSource(jsonBuilder().startObject().field("query", QueryBuilders.scriptQuery( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1==1", Collections.emptyMap()))).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .execute().actionGet(); + SearchResponse response = client().prepareSearch("index") + .setQuery(new PercolateQueryBuilder("query", jsonBuilder().startObject().field("field1", "b").endObject().bytes(), + XContentType.JSON)) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + } + + public void testPercolateQueryWithNestedDocuments_doNotLeakBitsetCacheEntries() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder(); + mapping.startObject().startObject("properties").startObject("companyname").field("type", "text").endObject() + .startObject("query").field("type", "percolator").endObject() + .startObject("employee").field("type", "nested").startObject("properties") + .startObject("name").field("type", "text").endObject().endObject().endObject().endObject() + .endObject(); + createIndex("test", client().admin().indices().prepareCreate("test") + // to avoid normal document from being cached by BitsetFilterCache + .setSettings(Settings.builder().put(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), false)) + .addMapping("employee", mapping) + ); + client().prepareIndex("test", "employee", "q1").setSource(jsonBuilder().startObject() + .field("query", QueryBuilders.nestedQuery("employee", + QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg) + ).endObject()) + .get(); + client().admin().indices().prepareRefresh().get(); + + for (int i = 0; i < 32; i++) { + SearchResponse response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", + XContentFactory.jsonBuilder() + .startObject().field("companyname", "stark") + .startArray("employee") + .startObject().field("name", "virginia potts").endObject() + .startObject().field("name", "tony stark").endObject() + .endArray() + .endObject().bytes(), XContentType.JSON)) + .addSort("_doc", SortOrder.ASC) + // size 0, because other wise load bitsets for normal document in FetchPhase#findRootDocumentIfNested(...) + .setSize(0) + .get(); + assertHitCount(response, 1); + } + + // We can't check via api... because BitsetCacheListener requires that it can extract shardId from index reader + // and for percolator it can't do that, but that means we don't keep track of + // memory for BitsetCache in case of percolator + long bitsetSize = client().admin().cluster().prepareClusterStats().get() + .getIndicesStats().getSegments().getBitsetMemoryInBytes(); + assertEquals("The percolator works with in-memory index and therefor shouldn't use bitset cache", 0L, bitsetSize); + } + + public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder(); + mapping.startObject(); + { + mapping.startObject("properties"); + { + mapping.startObject("query"); + mapping.field("type", "percolator"); + mapping.endObject(); + } + { + mapping.startObject("companyname"); + mapping.field("type", "text"); + mapping.endObject(); + } + { + mapping.startObject("employees"); + mapping.field("type", "nested"); + { + mapping.startObject("properties"); + { + mapping.startObject("name"); + mapping.field("type", "text"); + mapping.field("fielddata", true); + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + createIndex("test", client().admin().indices().prepareCreate("test") + .addMapping("employee", mapping) + ); + Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "use_fielddata_please", Collections.emptyMap()); + client().prepareIndex("test", "employee", "q1").setSource(jsonBuilder().startObject() + .field("query", QueryBuilders.nestedQuery("employees", + QueryBuilders.scriptQuery(script), ScoreMode.Avg) + ).endObject()).get(); + client().admin().indices().prepareRefresh().get(); + XContentBuilder doc = jsonBuilder(); + doc.startObject(); + { + doc.field("companyname", "stark"); + doc.startArray("employees"); + { + doc.startObject(); + doc.field("name", "virginia_potts"); + doc.endObject(); + } + { + doc.startObject(); + doc.field("name", "tony_stark"); + doc.endObject(); + } + doc.endArray(); + } + doc.endObject(); + for (int i = 0; i < 32; i++) { + SearchResponse response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", doc.bytes(), XContentType.JSON)) + .addSort("_doc", SortOrder.ASC) + .get(); + assertHitCount(response, 1); + } + + long fieldDataSize = client().admin().cluster().prepareClusterStats().get() + .getIndicesStats().getFieldData().getMemorySizeInBytes(); + assertEquals("The percolator works with in-memory index and therefor shouldn't use field-data cache", 0L, fieldDataSize); + } + +} diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml index 2ef653f117d..cdb88f7da51 100644 --- a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml @@ -5,7 +5,7 @@ index: queries_index body: mappings: - type: + doc: properties: query: type: percolator @@ -15,7 +15,7 @@ - do: index: index: queries_index - type: type + type: doc id: test_percolator body: query: @@ -29,7 +29,6 @@ body: - query: percolate: - document_type: type field: query document: foo: bar @@ -41,7 +40,6 @@ - index: queries_index - query: percolate: - document_type: type field: query document: foo: bar diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index aef822ac002..91673fd0a41 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -232,7 +233,9 @@ public abstract class AbstractAsyncBulkByScrollAction() { @Override public void onResponse(RefreshResponse response) { @@ -461,6 +472,7 @@ public abstract class AbstractAsyncBulkByScrollAction new ParameterizedMessage("[{}]: finishing with a catastrophic failure", task.getId()), failure); finishHim(failure, emptyList(), emptyList(), false); } @@ -473,6 +485,7 @@ public abstract class AbstractAsyncBulkByScrollAction indexingFailures, List searchFailures, boolean timedOut) { + logger.debug("[{}]: finishing without any catastrophic failures", task.getId()); scrollSource.close(() -> { if (failure == null) { BulkByScrollResponse response = buildResponse( diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 5c437da3464..315621bf86f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -561,18 +561,16 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } public void testCancelBeforeScrollResponse() throws Exception { - // We bail so early we don't need to pass in a half way valid response. cancelTaskCase((DummyAsyncBulkByScrollAction action) -> simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1, - null)); + new ScrollableHitSource.Response(false, emptyList(), between(1, 100000), emptyList(), null))); } public void testCancelBeforeSendBulkRequest() throws Exception { - // We bail so early we don't need to pass in a half way valid request. - cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.sendBulkRequest(timeValueNanos(System.nanoTime()), null)); + cancelTaskCase((DummyAsyncBulkByScrollAction action) -> + action.sendBulkRequest(timeValueNanos(System.nanoTime()), new BulkRequest())); } public void testCancelBeforeOnBulkResponse() throws Exception { - // We bail so early we don't need to pass in a half way valid response. cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.onBulkResponse(timeValueNanos(System.nanoTime()), new BulkResponse(new BulkItemResponse[0], 0))); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index a92ceedb0f3..3ad48d803a4 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -56,9 +56,9 @@ import static org.hamcrest.Matchers.hasSize; /** * Test that you can actually cancel a reindex/update-by-query/delete-by-query request and all the plumbing works. Doesn't test all of the * different cancellation places - that is the responsibility of AsyncBulkByScrollActionTests which have more precise control to - * simulate failures but do not exercise important portion of the stack like transport and task management. + * simulate failures but does not exercise important portion of the stack like transport and task management. */ -@TestLogging("org.elasticsearch.action.bulk.byscroll:DEBUG,org.elasticsearch.index.reindex:DEBUG") +@TestLogging("org.elasticsearch.index.reindex:DEBUG,org.elasticsearch.action.bulk:DEBUG") public class CancelTests extends ReindexTestCase { protected static final String INDEX = "reindex-cancel-index"; @@ -87,7 +87,7 @@ public class CancelTests extends ReindexTestCase { Matcher taskDescriptionMatcher) throws Exception { createIndex(INDEX); - // Total number of documents created for this test (~10 per primary shard per shard) + // Total number of documents created for this test (~10 per primary shard per slice) int numDocs = getNumShards(INDEX).numPrimaries * 10 * builder.request().getSlices(); ALLOWED_OPERATIONS.release(numDocs); @@ -231,12 +231,14 @@ public class CancelTests extends ReindexTestCase { } public void testReindexCancelWithWorkers() throws Exception { - testCancel(ReindexAction.NAME, reindex().source(INDEX).destination("dest", TYPE).setSlices(5), (response, total, modified) -> { - assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); - - refresh("dest"); - assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); - }, equalTo("reindex from [" + INDEX + "] to [dest][" + TYPE + "]")); + testCancel(ReindexAction.NAME, + reindex().source(INDEX).filter(QueryBuilders.matchAllQuery()).destination("dest", TYPE).setSlices(5), + (response, total, modified) -> { + assertThat(response, matcher().created(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); + refresh("dest"); + assertHitCount(client().prepareSearch("dest").setTypes(TYPE).setSize(0).get(), modified); + }, + equalTo("reindex from [" + INDEX + "] to [dest][" + TYPE + "]")); } public void testUpdateByQueryCancelWithWorkers() throws Exception { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java index e316759e041..aba7cd69359 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java @@ -19,14 +19,18 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.InternalSettingsPlugin; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; @@ -39,6 +43,12 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.hamcrest.Matchers.hasSize; public class DeleteByQueryBasicTests extends ReindexTestCase { + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(InternalSettingsPlugin.class); + return plugins; + } public void testBasics() throws Exception { indexRandom(true, @@ -237,4 +247,26 @@ public class DeleteByQueryBasicTests extends ReindexTestCase { assertThat(request.get(), matcher().deleted(5).slices(hasSize(5))); assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); } + + /** + * Test delete by query support for filtering by type. This entire feature + * can and should be removed when we drop support for types index with + * multiple types from core. + */ + public void testFilterByType() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings("index.version.created", Version.V_5_6_0.id)); // allows for multiple types + indexRandom(true, + client().prepareIndex("test", "test1", "1").setSource("foo", "a"), + client().prepareIndex("test", "test2", "2").setSource("foo", "a"), + client().prepareIndex("test", "test2", "3").setSource("foo", "b")); + + assertHitCount(client().prepareSearch("test").setSize(0).get(), 3); + + // Deletes doc of the type "type2" that also matches foo:a + DeleteByQueryRequestBuilder builder = deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true); + builder.source().setTypes("test2"); + assertThat(builder.get(), matcher().deleted(1)); + assertHitCount(client().prepareSearch("test").setSize(0).get(), 2); + } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java index 8c4135f1f26..14eb9245939 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java @@ -19,18 +19,25 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.join.ParentJoinPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.test.InternalSettingsPlugin; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.index.query.QueryBuilders.idsQuery; +import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; @@ -40,7 +47,8 @@ import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; /** - * Index-by-search tests for parent/child. + * Reindex tests for legacy parent/child. Tests for the new {@code join} + * field are in a qa project. */ public class ReindexParentChildTests extends ReindexTestCase { QueryBuilder findsCountry; @@ -56,6 +64,8 @@ public class ReindexParentChildTests extends ReindexTestCase { protected Collection> nodePlugins() { final List> plugins = new ArrayList<>(super.nodePlugins()); plugins.add(ParentJoinPlugin.class); + plugins.add(InternalSettingsPlugin.class); + plugins.add(CustomScriptPlugin.class); return Collections.unmodifiableList(plugins); } @@ -67,7 +77,7 @@ public class ReindexParentChildTests extends ReindexTestCase { public void testParentChild() throws Exception { createParentChildIndex("source"); createParentChildIndex("dest"); - createParentChildDocs("source"); + createParentChildDocs("source", true); // Copy parent to the new index ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCountry).refresh(true); @@ -98,9 +108,32 @@ public class ReindexParentChildTests extends ReindexTestCase { "make-believe"); } + /** + * Tests for adding the {@code _parent} via script and adding *both* {@code _parent} and {@code _routing} values via scripts. + */ + public void testScriptAddsParent() throws Exception { + assertAcked(client().admin().indices().prepareCreate("source") + .setSettings("index.version.created", Version.V_5_6_0.id)); // allows for multiple types + + createParentChildIndex("dest"); + createParentChildDocs("source", false); + + ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(typeQuery("country")).refresh(true); + assertThat(copy.get(), matcher().created(1)); + copy = reindex().source("source").destination("dest").filter(typeQuery("city")) + .script(mockScript("ctx._parent='united states'")).refresh(true); + assertThat(copy.get(), matcher().created(1)); + assertSearchHits(client().prepareSearch("dest").setQuery(findsCity).get(), "pittsburgh"); + + copy = reindex().source("source").destination("dest").filter(typeQuery("neighborhood")) + .script(mockScript("ctx._parent='pittsburgh';ctx._routing='united states'")).refresh(true); + assertThat(copy.get(), matcher().created(1)); + assertSearchHits(client().prepareSearch("dest").setQuery(findsNeighborhood).get(), "make-believe"); + } + public void testErrorMessageWhenBadParentChild() throws Exception { createParentChildIndex("source"); - createParentChildDocs("source"); + createParentChildDocs("source", true); ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCity); final BulkByScrollResponse response = copy.get(); @@ -116,25 +149,55 @@ public class ReindexParentChildTests extends ReindexTestCase { */ private void createParentChildIndex(String indexName) throws Exception { CreateIndexRequestBuilder create = client().admin().indices().prepareCreate(indexName); - create.setSettings("index.mapping.single_type", false); + create.setSettings("index.version.created", Version.V_5_6_0.id); // allows for multiple types create.addMapping("city", "{\"_parent\": {\"type\": \"country\"}}", XContentType.JSON); create.addMapping("neighborhood", "{\"_parent\": {\"type\": \"city\"}}", XContentType.JSON); assertAcked(create); ensureGreen(); } - private void createParentChildDocs(String indexName) throws Exception { - indexRandom(true, client().prepareIndex(indexName, "country", "united states").setSource("foo", "bar"), - client().prepareIndex(indexName, "city", "pittsburgh").setParent("united states").setSource("foo", "bar"), - client().prepareIndex(indexName, "neighborhood", "make-believe").setParent("pittsburgh") - .setSource("foo", "bar").setRouting("united states")); + private void createParentChildDocs(String indexName, boolean addParents) throws Exception { + indexRandom(true, + client().prepareIndex(indexName, "country", "united states") + .setSource("foo", "bar"), + client().prepareIndex(indexName, "city", "pittsburgh") + .setParent(addParents ? "united states" : null) + .setSource("foo", "bar"), + client().prepareIndex(indexName, "neighborhood", "make-believe") + .setParent(addParents ? "pittsburgh" : null) + .setRouting(addParents ? "united states" : null) + .setSource("foo", "bar")); findsCountry = idsQuery("country").addIds("united states"); findsCity = hasParentQuery("country", findsCountry, false); findsNeighborhood = hasParentQuery("city", findsCity, false); - // Make sure we built the parent/child relationship - assertSearchHits(client().prepareSearch(indexName).setQuery(findsCity).get(), "pittsburgh"); - assertSearchHits(client().prepareSearch(indexName).setQuery(findsNeighborhood).get(), "make-believe"); + if (addParents) { + // Make sure we built the parent/child relationship + assertSearchHits(client().prepareSearch(indexName).setQuery(findsCity).get(), "pittsburgh"); + assertSearchHits(client().prepareSearch(indexName).setQuery(findsNeighborhood).get(), "make-believe"); + } + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + @Override + @SuppressWarnings("unchecked") + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + + scripts.put("ctx._parent='united states'", vars -> { + Map ctx = (Map) vars.get("ctx"); + ctx.put("_parent", "united states"); + return null; + }); + scripts.put("ctx._parent='pittsburgh';ctx._routing='united states'", vars -> { + Map ctx = (Map) vars.get("ctx"); + ctx.put("_parent", "pittsburgh"); + ctx.put("_routing", "united states"); + return null; + }); + + return scripts; + } } } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/30_by_type.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/30_by_type.yml deleted file mode 100644 index 4ed279a0165..00000000000 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/30_by_type.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -"Delete by type": - - do: - indices.create: - index: test - body: - settings: - mapping.single_type: false - - - do: - index: - index: test - type: t1 - id: 1 - body: { foo: bar } - - do: - index: - index: test - type: t1 - id: 2 - body: { foo: bar } - - do: - index: - index: test - type: t2 - id: 1 - body: { foo: bar } - - do: - index: - index: test - type: t2 - id: 2 - body: { foo: bar } - - do: - index: - index: test - type: t2 - id: 3 - body: { foo: baz } - - do: - indices.refresh: {} - - do: - count: - index: test - type: t2 - - - match: {count: 3} - - - do: - delete_by_query: - index: test - type: t2 - body: - query: - match: - foo: bar - - - is_false: timed_out - - match: {deleted: 2} - - is_false: created - - is_false: updated - - match: {version_conflicts: 0} - - match: {batches: 1} - - match: {failures: []} - - match: {noops: 0} - - match: {throttled_millis: 0} - - gte: { took: 0 } - - is_false: task - - - do: - indices.refresh: {} - - - do: - count: - index: test - type: t2 - - - match: {count: 1} - diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 07e91ec50e4..12db47908d1 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -29,7 +29,6 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; @@ -172,7 +171,7 @@ final class Netty4HttpChannel extends AbstractRestChannel { private void addCookies(HttpResponse resp) { if (transport.resetCookies) { - String cookieString = nettyRequest.headers().get(HttpHeaders.Names.COOKIE); + String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); if (cookieString != null) { Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); if (!cookies.isEmpty()) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java index e83cfc62fda..9763a5116b1 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java @@ -22,11 +22,8 @@ package org.elasticsearch.transport.netty4; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.transport.TcpHeader; -import org.elasticsearch.transport.TcpTransport; -import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.Transports; import java.net.InetSocketAddress; @@ -37,25 +34,14 @@ import java.net.InetSocketAddress; */ final class Netty4MessageChannelHandler extends ChannelDuplexHandler { - private final TransportServiceAdapter transportServiceAdapter; private final Netty4Transport transport; private final String profileName; Netty4MessageChannelHandler(Netty4Transport transport, String profileName) { - this.transportServiceAdapter = transport.transportServiceAdapter(); this.transport = transport; this.profileName = profileName; } - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { - if (msg instanceof ByteBuf && transportServiceAdapter != null) { - // record the number of bytes send on the channel - promise.addListener(f -> transportServiceAdapter.addBytesSent(((ByteBuf) msg).readableBytes())); - } - ctx.write(msg, promise); - } - @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { Transports.assertTransportThread(); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index abe0739c243..140041b53b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -306,7 +306,7 @@ public class Netty4Transport extends TcpTransport { } @Override - public long serverOpen() { + public long getNumOpenServerConnections() { Netty4OpenChannelsHandler channels = serverOpenChannels; return channels == null ? 0 : channels.numberOfOpenChannels(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index 8bfdbb739d7..0cd567dd145 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -82,12 +82,9 @@ public class Netty4ScheduledPingTests extends ESTestCase { serviceA.connectToNode(nodeB); serviceB.connectToNode(nodeA); - assertBusy(new Runnable() { - @Override - public void run() { - assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(100L)); - assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(100L)); - } + assertBusy(() -> { + assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(100L)); + assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(100L)); }); assertThat(nettyA.getPing().getFailedPings(), equalTo(0L)); assertThat(nettyB.getPing().getFailedPings(), equalTo(0L)); diff --git a/plugins/analysis-icu/bin/licenses/icu4j-59.1.jar.sha1 b/plugins/analysis-icu/bin/licenses/icu4j-59.1.jar.sha1 new file mode 100644 index 00000000000..5401f914f58 --- /dev/null +++ b/plugins/analysis-icu/bin/licenses/icu4j-59.1.jar.sha1 @@ -0,0 +1 @@ +6f06e820cf4c8968bbbaae66ae0b33f6a256b57f \ No newline at end of file diff --git a/plugins/analysis-icu/bin/licenses/lucene-analyzers-icu-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-icu/bin/licenses/lucene-analyzers-icu-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..0c08e240dbf --- /dev/null +++ b/plugins/analysis-icu/bin/licenses/lucene-analyzers-icu-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +f90e2fe9e8ff1be65a800e719d2a25cd0a09cced \ No newline at end of file diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index a25c2c771ff..2a8905e080f 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -24,7 +24,7 @@ esplugin { dependencies { compile "org.apache.lucene:lucene-analyzers-icu:${versions.lucene}" - compile 'com.ibm.icu:icu4j:56.1' + compile 'com.ibm.icu:icu4j:59.1' } dependencyLicenses { diff --git a/plugins/analysis-icu/licenses/icu4j-56.1.jar.sha1 b/plugins/analysis-icu/licenses/icu4j-56.1.jar.sha1 deleted file mode 100644 index 51dc722bf92..00000000000 --- a/plugins/analysis-icu/licenses/icu4j-56.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dd6671f52165a0419e6de5e1016400875a90fa9 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/icu4j-59.1.jar.sha1 b/plugins/analysis-icu/licenses/icu4j-59.1.jar.sha1 new file mode 100644 index 00000000000..5401f914f58 --- /dev/null +++ b/plugins/analysis-icu/licenses/icu4j-59.1.jar.sha1 @@ -0,0 +1 @@ +6f06e820cf4c8968bbbaae66ae0b33f6a256b57f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.0.0-snapshot-a0aef2f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 6cebd9da7b2..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18e2a8a8096b13e191882aa77134e27c68e60372 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..0c08e240dbf --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +f90e2fe9e8ff1be65a800e719d2a25cd0a09cced \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java index 5fd3199e99a..60ab831e6f1 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java @@ -19,9 +19,8 @@ package org.elasticsearch.index.analysis; -import com.ibm.icu.text.FilteredNormalizer2; import com.ibm.icu.text.Normalizer2; -import com.ibm.icu.text.UnicodeSet; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.icu.ICUFoldingFilter; import org.elasticsearch.common.settings.Settings; @@ -41,31 +40,20 @@ import org.elasticsearch.index.IndexSettings; * @author kimchy (shay.banon) */ public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { - private final String unicodeSetFilter; + /** Store here the same Normalizer used by the lucene ICUFoldingFilter */ + private static final Normalizer2 ICU_FOLDING_NORMALIZER = Normalizer2.getInstance( + ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"), "utr30", Normalizer2.Mode.COMPOSE); + + private final Normalizer2 normalizer; public IcuFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - this.unicodeSetFilter = settings.get("unicodeSetFilter"); + this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(ICU_FOLDING_NORMALIZER, settings); } @Override public TokenStream create(TokenStream tokenStream) { - - // The ICUFoldingFilter is in fact implemented as a ICUNormalizer2Filter. - // ICUFoldingFilter lacks a constructor for adding filtering so we implemement it here - if (unicodeSetFilter != null) { - Normalizer2 base = Normalizer2.getInstance( - ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"), - "utr30", Normalizer2.Mode.COMPOSE); - UnicodeSet unicodeSet = new UnicodeSet(unicodeSetFilter); - - unicodeSet.freeze(); - Normalizer2 filtered = new FilteredNormalizer2(base, unicodeSet); - return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, filtered); - } - else { - return new ICUFoldingFilter(tokenStream); - } + return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer); } @Override diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java index 72bc45a0232..3046d6839b9 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.analysis; import com.ibm.icu.text.Normalizer2; + import org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -33,22 +34,22 @@ import java.io.Reader; * Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character. *

The name can be used to provide the type of normalization to perform.

*

The mode can be used to provide 'compose' or 'decompose'. Default is compose.

+ *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering.

*/ public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent { - private final String name; - private final Normalizer2 normalizer; public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name); - this.name = settings.get("name", "nfkc_cf"); + String method = settings.get("name", "nfkc_cf"); String mode = settings.get("mode"); if (!"compose".equals(mode) && !"decompose".equals(mode)) { mode = "compose"; } - this.normalizer = Normalizer2.getInstance( - null, this.name, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE); + Normalizer2 normalizer = Normalizer2.getInstance( + null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE); + this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(normalizer, settings); } @Override diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java index 2632958d203..4e8d5d70220 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java @@ -19,7 +19,10 @@ package org.elasticsearch.index.analysis; +import com.ibm.icu.text.FilteredNormalizer2; import com.ibm.icu.text.Normalizer2; +import com.ibm.icu.text.UnicodeSet; + import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -28,26 +31,40 @@ import org.elasticsearch.index.IndexSettings; /** * Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens. - *

The name can be used to provide the type of normalization to perform. + *

The name can be used to provide the type of normalization to perform.

+ *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering.

* * */ public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { - private final String name; + private final Normalizer2 normalizer; public IcuNormalizerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - this.name = settings.get("name", "nfkc_cf"); + String method = settings.get("name", "nfkc_cf"); + Normalizer2 normalizer = Normalizer2.getInstance(null, method, Normalizer2.Mode.COMPOSE); + this.normalizer = wrapWithUnicodeSetFilter(normalizer, settings); } @Override public TokenStream create(TokenStream tokenStream) { - return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, Normalizer2.getInstance(null, name, Normalizer2.Mode.COMPOSE)); + return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer); } @Override public Object getMultiTermComponent() { return this; } + + static Normalizer2 wrapWithUnicodeSetFilter(final Normalizer2 normalizer, Settings settings) { + String unicodeSetFilter = settings.get("unicodeSetFilter"); + if (unicodeSetFilter != null) { + UnicodeSet unicodeSet = new UnicodeSet(unicodeSetFilter); + + unicodeSet.freeze(); + return new FilteredNormalizer2(normalizer, unicodeSet); + } + return normalizer; + } } diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml index 180f6c6f5b6..521d8f07140 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/10_basic.yml @@ -39,3 +39,50 @@ tokenizer: keyword - length: { tokens: 1 } - match: { tokens.0.token: foo bar resume } +--- +"Normalization with a UnicodeSet Filter": + - do: + indices.create: + index: test + body: + settings: + index: + analysis: + char_filter: + charfilter_icu_normalizer: + type: icu_normalizer + unicodeSetFilter: "[^ß]" + filter: + tokenfilter_icu_normalizer: + type: icu_normalizer + unicodeSetFilter: "[^ßB]" + tokenfilter_icu_folding: + type: icu_folding + unicodeSetFilter: "[^â]" + - do: + indices.analyze: + index: test + body: + char_filter: ["charfilter_icu_normalizer"] + tokenizer: keyword + text: charfilter Föo Bâr Ruß + - length: { tokens: 1 } + - match: { tokens.0.token: charfilter föo bâr ruß } + - do: + indices.analyze: + index: test + body: + tokenizer: keyword + filter: ["tokenfilter_icu_normalizer"] + text: tokenfilter Föo Bâr Ruß + - length: { tokens: 1 } + - match: { tokens.0.token: tokenfilter föo Bâr ruß } + - do: + indices.analyze: + index: test + body: + tokenizer: keyword + filter: ["tokenfilter_icu_folding"] + text: icufolding Föo Bâr Ruß + - length: { tokens: 1 } + - match: { tokens.0.token: icufolding foo bâr russ } diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.0.0-snapshot-a0aef2f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 56ee53168d8..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -236924d9d6da7e4f36535e957e9a506b4e737302 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..ebb4d22be2e --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +345ac08f374992ba70a4785c2cba5ec64b1f1cf5 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.0.0-snapshot-a0aef2f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 1296ea36828..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f8b0087d03c65253122cbc3b3419f346204e80fe \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..8afd76e9716 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +c50fc14d093c4ad9fbc8d6e457d855034e59456e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.0.0-snapshot-a0aef2f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index ec0c34c2d1b..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3e5102270f6c10a3b33e402ed5f8722ec2a1a338 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..be99459c88b --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +cc4e86b04a8654885d69e849513219aaa7358435 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.0.0-snapshot-a0aef2f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index cfbd6ca2982..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6d9730ec654bdcf943a4018a5695e7954159ceda \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..cadf7aa13cd --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +b5ac4f79ef4b531e64ca19b22fc704cbd1618e6c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.0.0-snapshot-a0aef2f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.0.0-snapshot-a0aef2f.jar.sha1 deleted file mode 100644 index 5c15573f5bd..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.0.0-snapshot-a0aef2f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -26d01ae0d15243b30874b2cb609be5d041890459 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.0.0-snapshot-ad2cb77.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.0.0-snapshot-ad2cb77.jar.sha1 new file mode 100644 index 00000000000..847893a98da --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.0.0-snapshot-ad2cb77.jar.sha1 @@ -0,0 +1 @@ +8a6fc7317cbebed963c5ee6ce48f7f62fbba3883 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 0240781c1aa..a4fd985e4b5 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -22,24 +22,15 @@ package org.elasticsearch.plugin.discovery.azure.classic; import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.azure.classic.AzureUnicastHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.Arrays; @@ -73,17 +64,7 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { () -> new AzureUnicastHostsProvider(settings, createComputeService(), transportService, networkService)); } - @Override - public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - NamedWriteableRegistry namedWriteableRegistry, - MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, - AllocationService allocationService) { - // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider - return Collections.singletonMap(AZURE, () -> - new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider, allocationService)); - } + @Override public List> getSettings() { @@ -99,19 +80,5 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { AzureComputeService.Discovery.ENDPOINT_NAME_SETTING); } - @Override - public Settings additionalSettings() { - // For 5.0, the hosts provider was "zen", but this was before the discovery.zen.hosts_provider - // setting existed. This check looks for the legacy setting, and sets hosts provider if set - String discoveryType = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings); - if (discoveryType.equals(AZURE)) { - deprecationLogger.deprecated("using [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + - "] to set hosts provider is deprecated; " + - "set \"" + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey() + ": " + AZURE + "\" instead"); - if (DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.exists(settings) == false) { - return Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), AZURE).build(); - } - } - return Settings.EMPTY; - } + } diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index c9496b1ead4..55cc8a366c2 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -23,8 +23,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Discovery; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService.Management; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -44,7 +42,7 @@ public abstract class AbstractAzureComputeServiceTestCase extends ESIntegTestCas protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("discovery.type", "azure"); + .put("discovery.zen.hosts_provider", "azure"); // We add a fake subscription_id to start mock compute service builder.put(Management.SUBSCRIPTION_ID_SETTING.getKey(), "fake") diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index 09fa16b8ed0..d47d7286cd1 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -106,13 +106,12 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase { throw new RuntimeException(e); } return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), AzureDiscoveryPlugin.AZURE) + .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), AzureDiscoveryPlugin.AZURE) .put(Environment.PATH_LOGS_SETTING.getKey(), resolve) .put(TransportSettings.PORT.getKey(), 0) .put(Node.WRITE_PORTS_FILE_SETTING.getKey(), "true") .put(AzureComputeService.Management.ENDPOINT_SETTING.getKey(), "https://" + InetAddress.getLoopbackAddress().getHostAddress() + ":" + httpsServer.getAddress().getPort()) - .put(Environment.PATH_CONF_SETTING.getKey(), keyStoreFile.getParent().toAbsolutePath()) .put(AzureComputeService.Management.KEYSTORE_PATH_SETTING.getKey(), keyStoreFile.toAbsolutePath()) .put(AzureComputeService.Discovery.HOST_TYPE_SETTING.getKey(), AzureUnicastHostsProvider.HostType.PUBLIC_IP.name()) .put(AzureComputeService.Management.KEYSTORE_PASSWORD_SETTING.getKey(), "keypass") @@ -125,6 +124,11 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase { .build(); } + @Override + protected Path nodeConfigPath(int nodeOrdinal) { + return keyStoreFile.getParent(); + } + /** * Creates mock EC2 endpoint providing the list of started nodes to the DescribeInstances API call */ diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 3280368631b..50c5dac4b75 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -24,33 +24,24 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.UncheckedIOException; import java.io.BufferedReader; -import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.UncheckedIOException; +import java.io.Closeable; import java.net.URL; import java.net.URLConnection; import java.nio.charset.StandardCharsets; @@ -95,17 +86,7 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close this.settings = settings; } - @Override - public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - NamedWriteableRegistry namedWriteableRegistry, - MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, - AllocationService allocationService) { - // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider - return Collections.singletonMap(EC2, () -> - new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider, allocationService)); - } + @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index d20b5eaef05..f636dcaba0c 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -29,24 +29,15 @@ import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.cloud.gce.util.Access; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.discovery.gce.GceUnicastHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.Closeable; @@ -83,17 +74,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close logger.trace("starting gce discovery plugin..."); } - @Override - public Map> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, - NamedWriteableRegistry namedWriteableRegistry, - MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, - AllocationService allocationService) { - // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider - return Collections.singletonMap(GCE, () -> - new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider, allocationService)); - } + @Override public Map> getZenHostsProviders(TransportService transportService, @@ -122,21 +103,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close GceInstancesService.MAX_WAIT_SETTING); } - @Override - public Settings additionalSettings() { - // For 5.0, the hosts provider was "zen", but this was before the discovery.zen.hosts_provider - // setting existed. This check looks for the legacy setting, and sets hosts provider if set - String discoveryType = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings); - if (discoveryType.equals(GCE)) { - deprecationLogger.deprecated("Using " + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + - " setting to set hosts provider is deprecated. " + - "Set \"" + DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey() + ": " + GCE + "\" instead"); - if (DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.exists(settings) == false) { - return Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), GCE).build(); - } - } - return Settings.EMPTY; - } + @Override public void close() throws IOException { diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index ad33f8ec218..2b35a838e89 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -90,7 +90,7 @@ public class GceDiscoverTests extends ESIntegTestCase { throw new RuntimeException(e); } return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put("discovery.type", "gce") + .put("discovery.zen.hosts_provider", "gce") .put("path.logs", resolve) .put("transport.tcp.port", 0) .put("node.portsfile", "true") diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index e69de29bb2d..47db55b3b33 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -0,0 +1,10 @@ +// Subprojects aren't published so do not assemble +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + } +} diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 4b6713c6a64..5a146f75919 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -115,7 +115,7 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin { } @Override - public boolean needsScores() { + public boolean needs_score() { return false; } }; diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index fb362e6fa36..78e54d8bc81 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -21,6 +21,9 @@ esplugin { description 'Demonstrates all the pluggable Java entry points in Elasticsearch' classname 'org.elasticsearch.plugin.example.JvmExamplePlugin' } +// Not published so no need to assemble +tasks.remove(assemble) +build.dependsOn.remove('assemble') // no unit tests test.enabled = false @@ -47,4 +50,3 @@ integTestCluster { integTestRunner { systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" } - diff --git a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java index 431d4818791..03321e1c4a2 100644 --- a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java +++ b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/JvmExamplePlugin.java @@ -31,6 +31,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import java.nio.file.Path; import java.util.List; import java.util.function.Supplier; @@ -42,9 +43,8 @@ import static java.util.Collections.singletonList; public class JvmExamplePlugin extends Plugin implements ActionPlugin { private final ExamplePluginConfiguration config; - public JvmExamplePlugin(Settings settings) { - Environment environment = new Environment(settings); - config = new ExamplePluginConfiguration(environment); + public JvmExamplePlugin(Settings settings, Path configPath) { + config = new ExamplePluginConfiguration(new Environment(settings, configPath)); } @Override diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index 92cbb2e6e39..d3980c19110 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -111,6 +111,6 @@ public class SizeMappingIT extends ESIntegTestCase { client().prepareIndex("test", "type", "1").setSource(source, XContentType.JSON)); GetResponse getResponse = client().prepareGet("test", "type", "1").setStoredFields("_size").get(); assertNotNull(getResponse.getField("_size")); - assertEquals(source.length(), getResponse.getField("_size").getValue()); + assertEquals(source.length(), (int) getResponse.getField("_size").getValue()); } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/DefaultS3OutputStream.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/DefaultS3OutputStream.java index e80f07ac55e..d6bba2eea81 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/DefaultS3OutputStream.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/DefaultS3OutputStream.java @@ -122,30 +122,11 @@ class DefaultS3OutputStream extends S3OutputStream { } md.setContentLength(length); - // We try to compute a MD5 while reading it - MessageDigest messageDigest; - InputStream inputStream; - try { - messageDigest = MessageDigest.getInstance("MD5"); - inputStream = new DigestInputStream(is, messageDigest); - } catch (NoSuchAlgorithmException impossible) { - // Every implementation of the Java platform is required to support MD5 (see MessageDigest) - throw new RuntimeException(impossible); - } - - PutObjectRequest putRequest = new PutObjectRequest(bucketName, blobName, inputStream, md) + PutObjectRequest putRequest = new PutObjectRequest(bucketName, blobName, is, md) .withStorageClass(blobStore.getStorageClass()) .withCannedAcl(blobStore.getCannedACL()); - PutObjectResult putObjectResult = blobStore.client().putObject(putRequest); + blobStore.client().putObject(putRequest); - String localMd5 = Base64.encodeAsString(messageDigest.digest()); - String remoteMd5 = putObjectResult.getContentMd5(); - if (!localMd5.equals(remoteMd5)) { - logger.debug("MD5 local [{}], remote [{}] are not equal...", localMd5, remoteMd5); - throw new AmazonS3Exception("MD5 local [" + localMd5 + - "], remote [" + remoteMd5 + - "] are not equal..."); - } } private void initializeMultipart() { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java index 3f6ce26232b..64304879d0f 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java @@ -76,20 +76,13 @@ class MockAmazonS3 extends AbstractAmazonS3 { public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException { String blobName = putObjectRequest.getKey(); - DigestInputStream stream = (DigestInputStream) putObjectRequest.getInputStream(); if (blobs.containsKey(blobName)) { throw new AmazonS3Exception("[" + blobName + "] already exists."); } - blobs.put(blobName, stream); - - // input and output md5 hashes need to match to avoid an exception - String md5 = Base64.encodeAsString(stream.getMessageDigest().digest()); - PutObjectResult result = new PutObjectResult(); - result.setContentMd5(md5); - - return result; + blobs.put(blobName, putObjectRequest.getInputStream()); + return new PutObjectResult(); } @Override diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java index 672a90c0411..c54cab44a01 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java @@ -80,7 +80,6 @@ public class EvilSecurityTests extends ESTestCase { Settings.Builder settingsBuilder = Settings.builder(); settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.resolve("home").toString()); - settingsBuilder.put(Environment.PATH_CONF_SETTING.getKey(), esHome.resolve("conf").toString()); settingsBuilder.putArray(Environment.PATH_DATA_SETTING.getKey(), esHome.resolve("data1").toString(), esHome.resolve("data2").toString()); settingsBuilder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), esHome.resolve("custom").toString()); @@ -94,7 +93,7 @@ public class EvilSecurityTests extends ESTestCase { Environment environment; try { System.setProperty("java.io.tmpdir", fakeTmpDir.toString()); - environment = new Environment(settings); + environment = new Environment(settings, esHome.resolve("conf")); permissions = Security.createPermissions(environment); } finally { System.setProperty("java.io.tmpdir", realTmpDir); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java index 8dd1fb06136..f53c9d3b1f5 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerConfigurationTests.java @@ -62,10 +62,9 @@ public class EvilLoggerConfigurationTests extends ESTestCase { try { final Path configDir = getDataPath("config"); final Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, configDir); LogConfigurator.configure(environment); { @@ -100,11 +99,10 @@ public class EvilLoggerConfigurationTests extends ESTestCase { final Path configDir = getDataPath("config"); final String level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR).toString(); final Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("logger.level", level) .build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, configDir); LogConfigurator.configure(environment); final String loggerName = "test"; @@ -116,11 +114,10 @@ public class EvilLoggerConfigurationTests extends ESTestCase { public void testResolveOrder() throws Exception { final Path configDir = getDataPath("config"); final Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("logger.test_resolve_order", "TRACE") .build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, configDir); LogConfigurator.configure(environment); // args should overwrite whatever is in the config @@ -132,10 +129,9 @@ public class EvilLoggerConfigurationTests extends ESTestCase { public void testHierarchy() throws Exception { final Path configDir = getDataPath("hierarchy"); final Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, configDir); LogConfigurator.configure(environment); assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(Level.TRACE)); @@ -151,10 +147,9 @@ public class EvilLoggerConfigurationTests extends ESTestCase { public void testMissingConfigFile() { final Path configDir = getDataPath("does_not_exist"); final Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, configDir); UserException e = expectThrows(UserException.class, () -> LogConfigurator.configure(environment)); assertThat(e, hasToString(containsString("no log4j2.properties found; tried"))); } @@ -165,13 +160,12 @@ public class EvilLoggerConfigurationTests extends ESTestCase { final Level barLevel = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); final Path configDir = getDataPath("minimal"); final Settings settings = Settings.builder() - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put("logger.level", rootLevel.name()) .put("logger.foo", fooLevel.name()) .put("logger.bar", barLevel.name()) .build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, configDir); LogConfigurator.configure(environment); final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 10293b3b800..e4bf0fde7a3 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -194,16 +194,14 @@ public class EvilLoggerTests extends ESTestCase { } private void setupLogging(final String config, final Settings settings) throws IOException, UserException { - assert !Environment.PATH_CONF_SETTING.exists(settings); assert !Environment.PATH_HOME_SETTING.exists(settings); final Path configDir = getDataPath(config); - // need to set custom path.conf so we can use a custom log4j2.properties file for the test final Settings mergedSettings = Settings.builder() .put(settings) - .put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - final Environment environment = new Environment(mergedSettings); + // need to use custom config path so we can use a custom log4j2.properties file for the test + final Environment environment = new Environment(mergedSettings, configDir); LogConfigurator.configure(environment); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/node/EvilNodeTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/node/EvilNodeTests.java deleted file mode 100644 index 341d1227926..00000000000 --- a/qa/evil-tests/src/test/java/org/elasticsearch/node/EvilNodeTests.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.node; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyNoMoreInteractions; - -public class EvilNodeTests extends ESTestCase { - - public void testDefaultPathDataIncludedInPathData() throws IOException { - final Path zero = createTempDir().toAbsolutePath(); - final Path one = createTempDir().toAbsolutePath(); - // creating hard links to directories is okay on macOS so we exercise it here - final int random; - if (Constants.MAC_OS_X) { - random = randomFrom(0, 1, 2); - } else { - random = randomFrom(0, 1); - } - final Path defaultPathData; - final Path choice = randomFrom(zero, one); - switch (random) { - case 0: - defaultPathData = choice; - break; - case 1: - defaultPathData = createTempDir().toAbsolutePath().resolve("link"); - Files.createSymbolicLink(defaultPathData, choice); - break; - case 2: - defaultPathData = createTempDir().toAbsolutePath().resolve("link"); - Files.createLink(defaultPathData, choice); - break; - default: - throw new AssertionError(Integer.toString(random)); - } - final Settings settings = Settings.builder() - .put("path.home", createTempDir().toAbsolutePath()) - .put("path.data.0", zero) - .put("path.data.1", one) - .put("default.path.data", defaultPathData) - .build(); - try (NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings))) { - final Path defaultPathDataWithNodesAndId = defaultPathData.resolve("nodes/0"); - Files.createDirectories(defaultPathDataWithNodesAndId); - final NodeEnvironment.NodePath defaultNodePath = new NodeEnvironment.NodePath(defaultPathDataWithNodesAndId); - Files.createDirectories(defaultNodePath.indicesPath.resolve(UUIDs.randomBase64UUID())); - final Logger mock = mock(Logger.class); - // nothing should happen here - Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock); - verifyNoMoreInteractions(mock); - } - } - -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index ca2575901bc..adff57f517d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -58,7 +58,6 @@ public class TribeUnitTests extends ESTestCase { private static Node tribe1; private static Node tribe2; - @BeforeClass public static void createTribes() throws NodeValidationException { Settings baseSettings = Settings.builder() @@ -93,24 +92,22 @@ public class TribeUnitTests extends ESTestCase { } public void testThatTribeClientsIgnoreGlobalConfig() throws Exception { - Path pathConf = getDataPath("elasticsearch.yml").getParent(); - Settings settings = Settings - .builder() - .put(Environment.PATH_CONF_SETTING.getKey(), pathConf) - .build(); - assertTribeNodeSuccessfullyCreated(settings); + assertTribeNodeSuccessfullyCreated(getDataPath("elasticsearch.yml").getParent()); + assertWarnings("tribe nodes are deprecated in favor of cross-cluster search and will be removed in Elasticsearch 7.0.0"); } - private static void assertTribeNodeSuccessfullyCreated(Settings extraSettings) throws Exception { - //The tribe clients do need it to make sure they can find their corresponding tribes using the proper transport + private static void assertTribeNodeSuccessfullyCreated(Path configPath) throws Exception { + // the tribe clients do need it to make sure they can find their corresponding tribes using the proper transport Settings settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).put("node.name", "tribe_node") .put("transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).put("discovery.type", "local") .put("tribe.t1.transport.type", MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put("tribe.t2.transport.type",MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(extraSettings).build(); + .build(); - try (Node node = new MockNode(settings, Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class)).start()) { + final List> classpathPlugins = + Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class); + try (Node node = new MockNode(settings, classpathPlugins, configPath).start()) { try (Client client = node.client()) { assertBusy(() -> { ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState(); @@ -124,4 +121,5 @@ public class TribeUnitTests extends ESTestCase { } } } + } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 92378719573..8759cac4157 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -79,14 +79,18 @@ for (Version version : indexCompatVersions) { dependsOn = [upgradedClusterTest] } - bwcTest.dependsOn(versionBwcTest) + if (project.bwc_tests_enabled) { + bwcTest.dependsOn(versionBwcTest) + } } test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task integTest { - dependsOn = ["v${indexCompatVersions[-1]}#bwcTest"] + if (project.bwc_tests_enabled) { + dependsOn = ["v${indexCompatVersions[-1]}#bwcTest"] + } } check.dependsOn(integTest) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6a694e598c0..e60e255af93 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.upgrades; -import org.apache.http.ParseException; +import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; @@ -31,20 +31,26 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; import java.io.IOException; +import java.util.Base64; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThan; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -54,24 +60,40 @@ import static org.hamcrest.Matchers.containsString; * with {@code tests.is_old_cluster} set to {@code false}. */ public class FullClusterRestartIT extends ESRestTestCase { - private static final String REPO = "/_snapshot/repo"; - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); private final boolean supportsLenientBooleans = oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1); + private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); + + private String index; + + @Before + public void setIndex() { + index = getTestName().toLowerCase(Locale.ROOT); + } @Override protected boolean preserveIndicesUponCompletion() { return true; } + @Override + protected boolean preserveSnapshotsUponCompletion() { + return true; + } + @Override protected boolean preserveReposUponCompletion() { return true; } + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + public void testSearch() throws Exception { - String index = getTestName().toLowerCase(Locale.ROOT); + int count; if (runningAgainstOldCluster) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); @@ -95,6 +117,65 @@ public class FullClusterRestartIT extends ESRestTestCase { mappingsAndSettings.field("type", "text"); mappingsAndSettings.endObject(); } + { + mappingsAndSettings.startObject("binary"); + mappingsAndSettings.field("type", "binary"); + mappingsAndSettings.field("store", "true"); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + client().performRequest("PUT", "/" + index, Collections.emptyMap(), + new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); + + count = randomIntBetween(2000, 3000); + byte[] randomByteArray = new byte[16]; + random().nextBytes(randomByteArray); + indexRandomDocuments(count, true, true, i -> { + return JsonXContent.contentBuilder().startObject() + .field("string", randomAlphaOfLength(10)) + .field("int", randomInt(100)) + .field("float", randomFloat()) + // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct + .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) + .field("field.with.dots", randomAlphaOfLength(10)) + .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) + .endObject(); + }); + refresh(); + } else { + count = countOfIndexedRandomDocuments(); + } + assertBasicSearchWorks(count); + assertAllSearchWorks(count); + assertBasicAggregationWorks(); + assertRealtimeGetWorks(); + assertUpgradeWorks(); + assertStoredBinaryFields(count); + } + + public void testNewReplicasWork() throws Exception { + if (runningAgainstOldCluster) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("doc"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); @@ -104,59 +185,360 @@ public class FullClusterRestartIT extends ESRestTestCase { new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); int numDocs = randomIntBetween(2000, 3000); - indexRandomDocuments(index, numDocs, true, i -> { + indexRandomDocuments(numDocs, true, false, i -> { return JsonXContent.contentBuilder().startObject() - .field("string", randomAlphaOfLength(10)) - .field("int", randomInt(100)) - .field("float", randomFloat()) - // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct - .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) - .field("field.with.dots", randomAlphaOfLength(10)) - // TODO a binary field - .endObject(); + .field("field", "value") + .endObject(); }); logger.info("Refreshing [{}]", index); client().performRequest("POST", "/" + index + "/_refresh"); + } else { + final int numReplicas = 1; + final long startTime = System.currentTimeMillis(); + logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); + String requestBody = "{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"; + Response response = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + Map params = new HashMap<>(); + params.put("timeout", "2m"); + params.put("wait_for_status", "green"); + params.put("wait_for_no_relocating_shards", "true"); + params.put("wait_for_events", "languid"); + Map healthRsp = toMap(client().performRequest("GET", "/_cluster/health/" + index, params)); + assertEquals("green", healthRsp.get("status")); + assertFalse((Boolean) healthRsp.get("timed_out")); + + logger.debug("--> index [{}] is green, took [{}] ms", index, (System.currentTimeMillis() - startTime)); + Map recoverRsp = toMap(client().performRequest("GET", "/" + index + "/_recovery")); + logger.debug("--> recovery status:\n{}", recoverRsp); + + Map responseBody = toMap(client().performRequest("GET", "/" + index + "/_search", + Collections.singletonMap("preference", "_primary"))); + assertNoFailures(responseBody); + int foundHits1 = (int) XContentMapValues.extractValue("hits.total", responseBody); + + responseBody = toMap(client().performRequest("GET", "/" + index + "/_search", + Collections.singletonMap("preference", "_replica"))); + assertNoFailures(responseBody); + int foundHits2 = (int) XContentMapValues.extractValue("hits.total", responseBody); + assertEquals(foundHits1, foundHits2); + // TODO: do something more with the replicas! index? } - assertBasicSearchWorks(index); } - void assertBasicSearchWorks(String index) throws IOException { + /** + * Search on an alias that contains illegal characters that would prevent it from being created after 5.1.0. It should still be + * search-able though. + */ + public void testAliasWithBadName() throws Exception { + assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", + oldClusterVersion.before(VERSION_5_1_0_UNRELEASED)); + + int count; + if (runningAgainstOldCluster) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("doc"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("key"); + mappingsAndSettings.field("type", "keyword"); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + client().performRequest("PUT", "/" + index, Collections.emptyMap(), + new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); + + String aliasName = "%23" + index; // %23 == # + client().performRequest("PUT", "/" + index + "/_alias/" + aliasName); + Response response = client().performRequest("HEAD", "/" + index + "/_alias/" + aliasName); + assertEquals(200, response.getStatusLine().getStatusCode()); + + count = randomIntBetween(32, 128); + indexRandomDocuments(count, true, true, i -> { + return JsonXContent.contentBuilder().startObject() + .field("key", "value") + .endObject(); + }); + refresh(); + } else { + count = countOfIndexedRandomDocuments(); + } + + logger.error("clusterState=" + toMap(client().performRequest("GET", "/_cluster/state", + Collections.singletonMap("metric", "metadata")))); + // We can read from the alias just like we can read from the index. + String aliasName = "%23" + index; // %23 == # + Map searchRsp = toMap(client().performRequest("GET", "/" + aliasName + "/_search")); + int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); + assertEquals(count, totalHits); + if (runningAgainstOldCluster == false) { + // We can remove the alias. + Response response = client().performRequest("DELETE", "/" + index + "/_alias/" + aliasName); + assertEquals(200, response.getStatusLine().getStatusCode()); + // and check that it is gone: + response = client().performRequest("HEAD", "/" + index + "/_alias/" + aliasName); + assertEquals(404, response.getStatusLine().getStatusCode()); + } + } + + public void testClusterState() throws Exception { + if (runningAgainstOldCluster) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + mappingsAndSettings.field("template", index); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + client().performRequest("PUT", "/_template/template_1", Collections.emptyMap(), + new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); + client().performRequest("PUT", "/" + index); + } + + // verifying if we can still read some properties from cluster state api: + Map clusterState = toMap(client().performRequest("GET", "/_cluster/state")); + + // Check some global properties: + String clusterName = (String) clusterState.get("cluster_name"); + assertEquals("full-cluster-restart", clusterName); + String numberOfShards = (String) XContentMapValues.extractValue( + "metadata.templates.template_1.settings.index.number_of_shards", clusterState); + assertEquals("1", numberOfShards); + String numberOfReplicas = (String) XContentMapValues.extractValue( + "metadata.templates.template_1.settings.index.number_of_replicas", clusterState); + assertEquals("0", numberOfReplicas); + + // Check some index properties: + numberOfShards = (String) XContentMapValues.extractValue("metadata.indices." + index + + ".settings.index.number_of_shards", clusterState); + assertEquals("1", numberOfShards); + numberOfReplicas = (String) XContentMapValues.extractValue("metadata.indices." + index + + ".settings.index.number_of_replicas", clusterState); + assertEquals("0", numberOfReplicas); + Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index + + ".settings.index.version.created", clusterState))); + assertEquals(oldClusterVersion, version); + + } + + void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing basic search"); Map response = toMap(client().performRequest("GET", "/" + index + "/_search")); assertNoFailures(response); - int numDocs1 = (int) XContentMapValues.extractValue("hits.total", response); - logger.info("Found {} in old index", numDocs1); + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + logger.info("Found {} in old index", numDocs); + assertEquals(count, numDocs); logger.info("--> testing basic search with sort"); String searchRequestBody = "{ \"sort\": [{ \"int\" : \"asc\" }]}"; response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON))); assertNoFailures(response); - int numDocs2 = (int) XContentMapValues.extractValue("hits.total", response); - assertEquals(numDocs1, numDocs2); + numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertEquals(count, numDocs); logger.info("--> testing exists filter"); searchRequestBody = "{ \"query\": { \"exists\" : {\"field\": \"string\"} }}"; response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON))); assertNoFailures(response); - numDocs2 = (int) XContentMapValues.extractValue("hits.total", response); - assertEquals(numDocs1, numDocs2); + numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertEquals(count, numDocs); searchRequestBody = "{ \"query\": { \"exists\" : {\"field\": \"field.with.dots\"} }}"; response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON))); assertNoFailures(response); - numDocs2 = (int) XContentMapValues.extractValue("hits.total", response); - assertEquals(numDocs1, numDocs2); + numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertEquals(count, numDocs); + } + + void assertAllSearchWorks(int count) throws IOException { + logger.info("--> testing _all search"); + Map searchRsp = toMap(client().performRequest("GET", "/" + index + "/_search")); + assertNoFailures(searchRsp); + int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); + assertEquals(count, totalHits); + Map bestHit = (Map) ((List)(XContentMapValues.extractValue("hits.hits", searchRsp))).get(0); + + // Make sure there are payloads and they are taken into account for the score + // the 'string' field has a boost of 4 in the mappings so it should get a payload boost + String stringValue = (String) XContentMapValues.extractValue("_source.string", bestHit); + assertNotNull(stringValue); + String type = (String) bestHit.get("_type"); + String id = (String) bestHit.get("_id"); + String requestBody = "{ \"query\": { \"match_all\" : {} }}"; + String explanation = toStr(client().performRequest("GET", "/" + index + "/" + type + "/" + id, + Collections.emptyMap(), new StringEntity(requestBody, ContentType.APPLICATION_JSON))); + assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); + + // Make sure the query can run on the whole index + searchRsp = toMap(client().performRequest("GET", "/" + index + "/_search", + Collections.singletonMap("explain", "true"), new StringEntity(requestBody, ContentType.APPLICATION_JSON))); + assertNoFailures(searchRsp); + totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); + assertEquals(count, totalHits); + } + + void assertBasicAggregationWorks() throws IOException { + // histogram on a long + String requestBody = "{ \"aggs\": { \"histo\" : {\"histogram\" : {\"field\": \"int\", \"interval\": 10}} }}"; + Map searchRsp = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON))); + assertNoFailures(searchRsp); + List histoBuckets = (List) XContentMapValues.extractValue("aggregations.histo.buckets", searchRsp); + long totalCount = 0; + for (Object entry : histoBuckets) { + Map bucket = (Map) entry; + totalCount += (Integer) bucket.get("doc_count"); + } + int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); + assertEquals(totalHits, totalCount); + + // terms on a boolean + requestBody = "{ \"aggs\": { \"bool_terms\" : {\"terms\" : {\"field\": \"bool\"}} }}"; + searchRsp = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON))); + List termsBuckets = (List) XContentMapValues.extractValue("aggregations.bool_terms.buckets", searchRsp); + totalCount = 0; + for (Object entry : termsBuckets) { + Map bucket = (Map) entry; + totalCount += (Integer) bucket.get("doc_count"); + } + totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); + assertEquals(totalHits, totalCount); + } + + void assertRealtimeGetWorks() throws IOException { + String requestBody = "{ \"index\": { \"refresh_interval\" : -1 }}"; + Response response = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + requestBody = "{ \"query\": { \"match_all\" : {} }}"; + Map searchRsp = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON))); + Map hit = (Map) ((List)(XContentMapValues.extractValue("hits.hits", searchRsp))).get(0); + String docId = (String) hit.get("_id"); + + requestBody = "{ \"doc\" : { \"foo\": \"bar\"}}"; + response = client().performRequest("POST", "/" + index + "/doc/" + docId + "/_update", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + Map getRsp = toMap(client().performRequest("GET", "/" + index + "/doc/" + docId)); + Map source = (Map) getRsp.get("_source"); + assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); + + requestBody = "{ \"index\": { \"refresh_interval\" : \"1s\" }}"; + response = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), + new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + } + + void assertUpgradeWorks() throws Exception { + if (runningAgainstOldCluster) { + Map rsp = toMap(client().performRequest("GET", "/_upgrade")); + Map indexUpgradeStatus = (Map) XContentMapValues.extractValue("indices." + index, rsp); + int totalBytes = (Integer) indexUpgradeStatus.get("size_in_bytes"); + assertThat(totalBytes, greaterThan(0)); + int toUpgradeBytes = (Integer) indexUpgradeStatus.get("size_to_upgrade_in_bytes"); + assertEquals(0, toUpgradeBytes); + } else { + // Pre upgrade checks: + Map rsp = toMap(client().performRequest("GET", "/_upgrade")); + Map indexUpgradeStatus = (Map) XContentMapValues.extractValue("indices." + index, rsp); + int totalBytes = (Integer) indexUpgradeStatus.get("size_in_bytes"); + assertThat(totalBytes, greaterThan(0)); + int toUpgradeBytes = (Integer) indexUpgradeStatus.get("size_to_upgrade_in_bytes"); + assertThat(toUpgradeBytes, greaterThan(0)); + + // Upgrade segments: + Response r = client().performRequest("POST", "/" + index + "/_upgrade"); + assertEquals(200, r.getStatusLine().getStatusCode()); + + // Post upgrade checks: + assertBusy(() -> { + Map rsp2 = toMap(client().performRequest("GET", "/_upgrade")); + logger.info("upgrade status response: {}", rsp2); + Map indexUpgradeStatus2 = (Map) XContentMapValues.extractValue("indices." + index, rsp2); + assertNotNull(indexUpgradeStatus2); + int totalBytes2 = (Integer) indexUpgradeStatus2.get("size_in_bytes"); + assertThat(totalBytes2, greaterThan(0)); + int toUpgradeBytes2 = (Integer) indexUpgradeStatus2.get("size_to_upgrade_in_bytes"); + assertEquals(0, toUpgradeBytes2); + }); + + rsp = toMap(client().performRequest("GET", "/" + index + "/_segments")); + Map shards = (Map) XContentMapValues.extractValue("indices." + index + ".shards", rsp); + for (Object shard : shards.values()) { + List shardSegments = (List) shard; + for (Object shardSegment : shardSegments) { + Map shardSegmentRsp = (Map) shardSegment; + Map segments = (Map) shardSegmentRsp.get("segments"); + for (Object segment : segments.values()) { + Map segmentRsp = (Map) segment; + org.apache.lucene.util.Version luceneVersion = + org.apache.lucene.util.Version.parse((String) segmentRsp.get("version")); + assertEquals("Un-upgraded segment " + segment, Version.CURRENT.luceneVersion.major, luceneVersion.major); + assertEquals("Un-upgraded segment " + segment, Version.CURRENT.luceneVersion.minor, luceneVersion.minor); + assertEquals("Un-upgraded segment " + segment, Version.CURRENT.luceneVersion.bugfix, luceneVersion.bugfix); + } + } + } + } + } + + void assertStoredBinaryFields(int count) throws Exception { + String requestBody = "{ \"query\": { \"match_all\" : {} }, \"size\": 100, \"stored_fields\": \"binary\"}"; + Map rsp = toMap(client().performRequest("GET", "/" + index + "/_search", + Collections.emptyMap(), new StringEntity(requestBody, ContentType.APPLICATION_JSON))); + + int totalCount = (Integer) XContentMapValues.extractValue("hits.total", rsp); + assertEquals(count, totalCount); + List hits = (List) XContentMapValues.extractValue("hits.hits", rsp); + assertEquals(100, hits.size()); + for (Object hit : hits) { + Map hitRsp = (Map) hit; + List values = (List) XContentMapValues.extractValue("fields.binary", hitRsp); + assertEquals(1, values.size()); + String value = (String) values.get(0); + byte[] binaryValue = Base64.getDecoder().decode(value); + assertEquals("Unexpected string length [" + value + "]", 16, binaryValue.length); + } } static Map toMap(Response response) throws IOException { - return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); + return toMap(EntityUtils.toString(response.getEntity())); } - static void assertNoFailures(Map response) { + static Map toMap(String response) throws IOException { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + static String toStr(Response response) throws IOException { + return EntityUtils.toString(response.getEntity()); + } + + static void assertNoFailures(Map response) { int failed = (int) XContentMapValues.extractValue("_shards.failed", response); assertEquals(0, failed); } @@ -165,7 +547,7 @@ public class FullClusterRestartIT extends ESRestTestCase { * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/" + getTestName().toLowerCase(Locale.ROOT) + "/doc/1"; + String docLocation = "/" + index + "/doc/1"; String doc = "{\"test\": \"test\"}"; if (runningAgainstOldCluster) { @@ -173,14 +555,14 @@ public class FullClusterRestartIT extends ESRestTestCase { new StringEntity(doc, ContentType.APPLICATION_JSON)); } - assertThat(EntityUtils.toString(client().performRequest("GET", docLocation).getEntity()), containsString(doc)); + assertThat(toStr(client().performRequest("GET", docLocation)), containsString(doc)); } - public void testRandomDocumentsAndSnapshot() throws IOException { - String testName = getTestName().toLowerCase(Locale.ROOT); - String index = testName + "_data"; - String infoDocument = "/" + testName + "_info/doc/info"; - + /** + * Tests recovery of an index with or without a translog and the + * statistics we gather about that. + */ + public void testRecovery() throws IOException { int count; boolean shouldHaveTranslog; if (runningAgainstOldCluster) { @@ -189,110 +571,51 @@ public class FullClusterRestartIT extends ESRestTestCase { * an index without a translog so we randomize whether * or not we have one. */ shouldHaveTranslog = randomBoolean(); - logger.info("Creating {} documents", count); - indexRandomDocuments(index, count, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); - createSnapshot(); + + indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); // Explicitly flush so we're sure to have a bunch of documents in the Lucene index client().performRequest("POST", "/_flush"); if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog - indexRandomDocuments(index, count / 10, false /* Flushing here would invalidate the whole thing....*/, + indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false, i -> jsonBuilder().startObject().field("field", "value").endObject()); } - - // Record how many documents we built so we can compare later - XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); - infoDoc.field("count", count); - infoDoc.field("should_have_translog", shouldHaveTranslog); - infoDoc.endObject(); - client().performRequest("PUT", infoDocument, singletonMap("refresh", "true"), - new StringEntity(infoDoc.string(), ContentType.APPLICATION_JSON)); + saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog)); } else { - // Load the number of documents that were written to the old cluster - String doc = EntityUtils.toString( - client().performRequest("GET", infoDocument, singletonMap("filter_path", "_source")).getEntity()); - Matcher m = Pattern.compile("\"count\":(\\d+)").matcher(doc); - assertTrue(doc, m.find()); - count = Integer.parseInt(m.group(1)); - m = Pattern.compile("\"should_have_translog\":(true|false)").matcher(doc); - assertTrue(doc, m.find()); - shouldHaveTranslog = Booleans.parseBoolean(m.group(1)); + count = countOfIndexedRandomDocuments(); + shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument("should_have_translog")); } // Count the documents in the index to make sure we have as many as we put there - String countResponse = EntityUtils.toString( - client().performRequest("GET", "/" + index + "/_search", singletonMap("size", "0")).getEntity()); + String countResponse = toStr(client().performRequest("GET", "/" + index + "/_search", singletonMap("size", "0"))); assertThat(countResponse, containsString("\"total\":" + count)); if (false == runningAgainstOldCluster) { - assertTranslogRecoveryStatistics(index, shouldHaveTranslog); - } - - restoreSnapshot(index, count); - - // TODO finish adding tests for the things in OldIndexBackwardsCompatibilityIT - } - - // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. - - private void indexRandomDocuments(String index, int count, boolean flushAllowed, - CheckedFunction docSupplier) throws IOException { - for (int i = 0; i < count; i++) { - logger.debug("Indexing document [{}]", i); - client().performRequest("POST", "/" + index + "/doc/" + i, emptyMap(), - new StringEntity(docSupplier.apply(i).string(), ContentType.APPLICATION_JSON)); - if (rarely()) { - logger.info("Refreshing [{}]", index); - client().performRequest("POST", "/" + index + "/_refresh"); + boolean restoredFromTranslog = false; + boolean foundPrimary = false; + Map params = new HashMap<>(); + params.put("h", "index,shard,type,stage,translog_ops_recovered"); + params.put("s", "index,shard,type"); + String recoveryResponse = toStr(client().performRequest("GET", "/_cat/recovery/" + index, params)); + for (String line : recoveryResponse.split("\n")) { + // Find the primaries + foundPrimary = true; + if (false == line.contains("done") && line.contains("existing_store")) { + continue; + } + /* Mark if we see a primary that looked like it restored from the translog. + * Not all primaries will look like this all the time because we modify + * random documents when we want there to be a translog and they might + * not be spread around all the shards. */ + Matcher m = Pattern.compile("(\\d+)$").matcher(line); + assertTrue(line, m.find()); + int translogOps = Integer.parseInt(m.group(1)); + if (translogOps > 0) { + restoredFromTranslog = true; + } } - if (flushAllowed && rarely()) { - logger.info("Flushing [{}]", index); - client().performRequest("POST", "/" + index + "/_flush"); - } - } - } - - private void createSnapshot() throws IOException { - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { - repoConfig.field("type", "fs"); - repoConfig.startObject("settings"); { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); - } - repoConfig.endObject(); - } - repoConfig.endObject(); - client().performRequest("PUT", REPO, emptyMap(), new StringEntity(repoConfig.string(), ContentType.APPLICATION_JSON)); - - client().performRequest("PUT", REPO + "/snap", singletonMap("wait_for_completion", "true")); - } - - private void assertTranslogRecoveryStatistics(String index, boolean shouldHaveTranslog) throws ParseException, IOException { - boolean restoredFromTranslog = false; - boolean foundPrimary = false; - Map params = new HashMap<>(); - params.put("h", "index,shard,type,stage,translog_ops_recovered"); - params.put("s", "index,shard,type"); - String recoveryResponse = EntityUtils.toString(client().performRequest("GET", "/_cat/recovery/" + index, params).getEntity()); - for (String line : recoveryResponse.split("\n")) { - // Find the primaries - foundPrimary = true; - if (false == line.contains("done") && line.contains("existing_store")) { - continue; - } - /* Mark if we see a primary that looked like it restored from the translog. - * Not all primaries will look like this all the time because we modify - * random documents when we want there to be a translog and they might - * not be spread around all the shards. */ - Matcher m = Pattern.compile("(\\d+)$").matcher(line); - assertTrue(line, m.find()); - int translogOps = Integer.parseInt(m.group(1)); - if (translogOps > 0) { - restoredFromTranslog = true; - } - } - assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary); - assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); + assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary); + assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString(); @@ -302,8 +625,8 @@ public class FullClusterRestartIT extends ESRestTestCase { params.clear(); params.put("h", "prirep,shard,index,version"); params.put("s", "prirep,shard,index"); - String segmentsResponse = EntityUtils.toString( - client().performRequest("GET", "/_cat/segments/" + index, params).getEntity()); + String segmentsResponse = toStr( + client().performRequest("GET", "/_cat/segments/" + index, params)); for (String line : segmentsResponse.split("\n")) { if (false == line.startsWith("p")) { continue; @@ -320,38 +643,243 @@ public class FullClusterRestartIT extends ESRestTestCase { } } assertNotEquals("expected at least 1 current segment after translog recovery", 0, numCurrentVersion); - assertNotEquals("expected at least 1 old segment", 0, numBwcVersion); + assertNotEquals("expected at least 1 old segment", 0, numBwcVersion);} } } - private void restoreSnapshot(String index, int count) throws ParseException, IOException { - if (false == runningAgainstOldCluster) { - /* Remove any "restored" indices from the old cluster run of this test. - * We intentionally don't remove them while running this against the - * old cluster so we can test starting the node with a restored index - * in the cluster. */ - client().performRequest("DELETE", "/restored_*"); + /** + * Tests snapshot/restore by creating a snapshot and restoring it. It takes + * a snapshot on the old cluster and restores it on the old cluster as a + * sanity check and on the new cluster as an upgrade test. It also takes a + * snapshot on the new cluster and restores that on the new cluster as a + * test that the repository is ok with containing snapshot from both the + * old and new versions. All of the snapshots include an index, a template, + * and some routing configuration. + */ + public void testSnapshotRestore() throws IOException { + int count; + if (runningAgainstOldCluster) { + // Create the index + count = between(200, 300); + indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + } else { + count = countOfIndexedRandomDocuments(); } + // Refresh the index so the count doesn't fail + refresh(); + + // Count the documents in the index to make sure we have as many as we put there + String countResponse = toStr(client().performRequest("GET", "/" + index + "/_search", singletonMap("size", "0"))); + assertThat(countResponse, containsString("\"total\":" + count)); + + // Stick a routing attribute into to cluster settings so we can see it after the restore + HttpEntity routingSetting = new StringEntity( + "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + oldClusterVersion + "\"}}", + ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/_cluster/settings", emptyMap(), routingSetting); + + // Stick a template into the cluster so we can see it after the restore + XContentBuilder templateBuilder = JsonXContent.contentBuilder().startObject(); + templateBuilder.field("template", "evil_*"); // Don't confuse other tests by applying the template + templateBuilder.startObject("settings"); { + templateBuilder.field("number_of_shards", 1); + } + templateBuilder.endObject(); + templateBuilder.startObject("mappings"); { + templateBuilder.startObject("doc"); { + templateBuilder.startObject("_source"); { + templateBuilder.field("enabled", true); + } + templateBuilder.endObject(); + } + templateBuilder.endObject(); + } + templateBuilder.endObject(); + templateBuilder.startObject("aliases"); { + templateBuilder.startObject("alias1").endObject(); + templateBuilder.startObject("alias2"); { + templateBuilder.startObject("filter"); { + templateBuilder.startObject("term"); { + templateBuilder.field("version", runningAgainstOldCluster ? oldClusterVersion : Version.CURRENT); + } + templateBuilder.endObject(); + } + templateBuilder.endObject(); + } + templateBuilder.endObject(); + } + templateBuilder.endObject().endObject(); + client().performRequest("PUT", "/_template/test_template", emptyMap(), + new StringEntity(templateBuilder.string(), ContentType.APPLICATION_JSON)); + if (runningAgainstOldCluster) { - // TODO restoring the snapshot seems to fail! This seems like a bug. - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("include_global_state", false); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored_" + index); - restoreCommand.endObject(); - client().performRequest("POST", REPO + "/snap/_restore", singletonMap("wait_for_completion", "true"), - new StringEntity(restoreCommand.string(), ContentType.APPLICATION_JSON)); - - String countResponse = EntityUtils.toString( - client().performRequest("GET", "/restored_" + index + "/_search", singletonMap("size", "0")).getEntity()); - assertThat(countResponse, containsString("\"total\":" + count)); + // Create the repo + XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { + repoConfig.field("type", "fs"); + repoConfig.startObject("settings"); { + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", System.getProperty("tests.path.repo")); + } + repoConfig.endObject(); + } + repoConfig.endObject(); + client().performRequest("PUT", "/_snapshot/repo", emptyMap(), + new StringEntity(repoConfig.string(), ContentType.APPLICATION_JSON)); } + client().performRequest("PUT", "/_snapshot/repo/" + (runningAgainstOldCluster ? "old_snap" : "new_snap"), + singletonMap("wait_for_completion", "true"), + new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)); + + checkSnapshot("old_snap", count, oldClusterVersion); + if (false == runningAgainstOldCluster) { + checkSnapshot("new_snap", count, Version.CURRENT); + } + } + + private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { + // Check the snapshot metadata, especially the version + String response = toStr(client().performRequest("GET", "/_snapshot/repo/" + snapshotName, listSnapshotVerboseParams())); + Map map = toMap(response); + assertEquals(response, singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", map)); + assertEquals(response, singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", map)); + assertEquals(response, singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", map)); + + // Remove the routing setting and template so we can test restoring them. + HttpEntity clearRoutingSetting = new StringEntity( + "{\"persistent\":{\"cluster.routing.allocation.exclude.test_attr\": null}}", + ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/_cluster/settings", emptyMap(), clearRoutingSetting); + client().performRequest("DELETE", "/_template/test_template", emptyMap(), clearRoutingSetting); + + // Restore + XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); + restoreCommand.field("include_global_state", true); + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored_" + index); + restoreCommand.endObject(); + client().performRequest("POST", "/_snapshot/repo/" + snapshotName + "/_restore", singletonMap("wait_for_completion", "true"), + new StringEntity(restoreCommand.string(), ContentType.APPLICATION_JSON)); + + // Make sure search finds all documents + String countResponse = toStr(client().performRequest("GET", "/restored_" + index + "/_search", singletonMap("size", "0"))); + assertThat(countResponse, containsString("\"total\":" + count)); + + // Add some extra documents to the index to be sure we can still write to it after restoring it + int extras = between(1, 100); + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < extras; i++) { + bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + } + client().performRequest("POST", "/restored_" + index + "/doc/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + // And count to make sure the add worked + // Make sure search finds all documents + countResponse = toStr(client().performRequest("GET", "/restored_" + index + "/_search", singletonMap("size", "0"))); + assertThat(countResponse, containsString("\"total\":" + (count + extras))); + + // Clean up the index for the next iteration + client().performRequest("DELETE", "/restored_*"); + + // Check settings added by the restore process + map = toMap(client().performRequest("GET", "/_cluster/settings", singletonMap("flat_settings", "true"))); + Map expected = new HashMap<>(); + expected.put("transient", emptyMap()); + expected.put("persistent", singletonMap("cluster.routing.allocation.exclude.test_attr", oldClusterVersion.toString())); + if (expected.equals(map) == false) { + NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); + builder.compareMaps(map, expected); + fail("settings don't match:\n" + builder.toString()); + } + + // Check that the template was restored successfully + map = toMap(client().performRequest("GET", "/_template/test_template")); + expected = new HashMap<>(); + if (runningAgainstOldCluster) { + expected.put("template", "evil_*"); + } else { + expected.put("index_patterns", singletonList("evil_*")); + } + expected.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); + expected.put("mappings", singletonMap("doc", singletonMap("_source", singletonMap("enabled", true)))); + expected.put("order", 0); + Map aliases = new HashMap<>(); + aliases.put("alias1", emptyMap()); + aliases.put("alias2", singletonMap("filter", singletonMap("term", singletonMap("version", tookOnVersion.toString())))); + expected.put("aliases", aliases); + expected = singletonMap("test_template", expected); + if (false == expected.equals(map)) { + NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); + builder.compareMaps(map, expected); + fail("template doesn't match:\n" + builder.toString()); + } + + } + + /** + * Parameters required to get the version of Elasticsearch that took the snapshot. + * On versions after 5.5 we need a {@code verbose} parameter. + */ + private Map listSnapshotVerboseParams() { + if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_5_5_0)) { + return emptyMap(); + } + return singletonMap("verbose", "true"); + } + + // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. + + private void indexRandomDocuments(int count, boolean flushAllowed, boolean saveInfo, + CheckedFunction docSupplier) throws IOException { + logger.info("Indexing {} random documents", count); + for (int i = 0; i < count; i++) { + logger.debug("Indexing document [{}]", i); + client().performRequest("POST", "/" + index + "/doc/" + i, emptyMap(), + new StringEntity(docSupplier.apply(i).string(), ContentType.APPLICATION_JSON)); + if (rarely()) { + refresh(); + } + if (flushAllowed && rarely()) { + logger.debug("Flushing [{}]", index); + client().performRequest("POST", "/" + index + "/_flush"); + } + } + if (saveInfo) { + saveInfoDocument("count", Integer.toString(count)); + } + } + + private int countOfIndexedRandomDocuments() throws IOException { + return Integer.parseInt(loadInfoDocument("count")); + } + + private void saveInfoDocument(String type, String value) throws IOException { + XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); + infoDoc.field("value", value); + infoDoc.endObject(); + // Only create the first version so we know how many documents are created when the index is first created + Map params = singletonMap("op_type", "create"); + client().performRequest("PUT", "/info/doc/" + index + "_" + type, params, + new StringEntity(infoDoc.string(), ContentType.APPLICATION_JSON)); + } + + private String loadInfoDocument(String type) throws IOException { + String doc = toStr(client().performRequest("GET", "/info/doc/" + index + "_" + type, singletonMap("filter_path", "_source"))); + Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); + assertTrue(doc, m.find()); + return m.group(1); } private Object randomLenientBoolean() { return randomFrom(new Object[] {"off", "no", "0", 0, "false", false, "on", "yes", "1", 1, "true", true}); } + + private void refresh() throws IOException { + logger.debug("Refreshing [{}]", index); + client().performRequest("POST", "/" + index + "/_refresh"); + } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index a0f6b92e9e7..66185325931 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -51,14 +51,18 @@ for (Version version : wireCompatVersions) { dependsOn = [mixedClusterTest] } - bwcTest.dependsOn(versionBwcTest) + if (project.bwc_tests_enabled) { + bwcTest.dependsOn(versionBwcTest) + } } test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task integTest { - dependsOn = ["v${wireCompatVersions[-1]}#bwcTest"] + if (project.bwc_tests_enabled) { + dependsOn = ["v${wireCompatVersions[-1]}#bwcTest"] + } } check.dependsOn(integTest) diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/10_parent_child.yml b/qa/mixed-cluster/src/test/resources/rest-api-spec/test/10_parent_child.yml similarity index 95% rename from modules/parent-join/src/test/resources/rest-api-spec/test/10_parent_child.yml rename to qa/mixed-cluster/src/test/resources/rest-api-spec/test/10_parent_child.yml index 29a50c64f28..fb0462acd1a 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/10_parent_child.yml +++ b/qa/mixed-cluster/src/test/resources/rest-api-spec/test/10_parent_child.yml @@ -3,8 +3,6 @@ setup: indices.create: index: test body: - settings: - mapping.single_type: false mappings: type_2: {} type_3: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml new file mode 100644 index 00000000000..c40e7be1c64 --- /dev/null +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml @@ -0,0 +1,44 @@ +--- +"Search with missing remote index pattern": + - do: + catch: "request" + search: + index: "my_remote_cluster:foo" + + - do: + search: + index: "my_remote_cluster:fooo*" + - match: { _shards.total: 0 } + - match: { hits.total: 0 } + + - do: + search: + index: "*:foo*" + + - match: { _shards.total: 0 } + - match: { hits.total: 0 } + + - do: + search: + index: "my_remote_cluster:test_index,my_remote_cluster:foo*" + body: + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 3 } + - match: { hits.total: 6 } + - length: { aggregations.cluster.buckets: 1 } + - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 6 } + + - do: + catch: "request" + search: + index: "my_remote_cluster:test_index,my_remote_cluster:foo" + body: + aggs: + cluster: + terms: + field: f1.keyword diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 03cbf24bdcf..b5f84160130 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -95,17 +95,22 @@ for (Version version : wireCompatVersions) { } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { + enabled = project.bwc_tests_enabled dependsOn = [upgradedClusterTest] } - bwcTest.dependsOn(versionBwcTest) + if (project.bwc_tests_enabled) { + bwcTest.dependsOn(versionBwcTest) + } } test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version task integTest { - dependsOn = ["v${wireCompatVersions[-1]}#bwcTest"] + if (project.bwc_tests_enabled) { + dependsOn = ["v${wireCompatVersions[-1]}#bwcTest"] + } } check.dependsOn(integTest) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 2977e783cf6..6dfdbb987cc 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -55,6 +55,7 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa // increase the timeout so that we can actually see the result of failed cluster health // calls that have a default timeout of 30s .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "40s") + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "40s") .build(); } } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java index 948f573a05c..bedb11ecc93 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matcher; import java.io.IOException; @@ -54,6 +55,7 @@ import static org.hamcrest.Matchers.hasSize; /** * Tests {@code DeprecationLogger} uses the {@code ThreadContext} to add response headers. */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class DeprecationHttpIT extends HttpSmokeTestCase { @Override @@ -125,14 +127,6 @@ public class DeprecationHttpIT extends HttpSmokeTestCase { doTestDeprecationWarningsAppearInHeaders(); } - public void testDeprecationHeadersDoNotGetStuck() throws Exception { - doTestDeprecationWarningsAppearInHeaders(); - doTestDeprecationWarningsAppearInHeaders(); - if (rarely()) { - doTestDeprecationWarningsAppearInHeaders(); - } - } - /** * Run a request that receives a predictably randomized number of deprecation warnings. *

diff --git a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/10_script.yml b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/10_script.yml index ba30ead5202..8fda091d80d 100644 --- a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/10_script.yml +++ b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/10_script.yml @@ -81,60 +81,6 @@ user: blort - match: { hits.total: 1 } ---- -"Add new parent": - - do: - indices.create: - index: new_twitter - body: - settings: - mapping.single_type: false - mappings: - tweet: - _parent: { type: "user" } - - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: new_twitter - type: user - id: kimchy - body: { "name": "kimchy" } - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: twitter - dest: - index: new_twitter - script: - lang: painless - source: ctx._parent = ctx._source.user - - match: {created: 1} - - match: {noops: 0} - - - do: - search: - index: new_twitter - body: - query: - has_parent: - parent_type: user - query: - match: - name: kimchy - - match: { hits.total: 1 } - - match: { hits.hits.0._source.user: kimchy } - --- "Add routing": - do: @@ -182,63 +128,6 @@ routing: foo - match: { _routing: foo } ---- -"Add routing and parent": - - do: - indices.create: - index: new_twitter - body: - settings: - mapping.single_type: false - mappings: - tweet: - _parent: { type: "user" } - - - do: - index: - index: twitter - type: tweet - id: 1 - body: { "user": "kimchy" } - - do: - index: - index: new_twitter - type: user - id: kimchy - body: { "name": "kimchy" } - routing: cat - - do: - indices.refresh: {} - - - do: - reindex: - refresh: true - body: - source: - index: twitter - dest: - index: new_twitter - script: - lang: painless - source: ctx._parent = ctx._source.user; ctx._routing = "cat" - - match: {created: 1} - - match: {noops: 0} - - - do: - search: - index: new_twitter - routing: cat - body: - query: - has_parent: - parent_type: user - query: - match: - name: kimchy - - match: { hits.total: 1 } - - match: { hits.hits.0._source.user: kimchy } - - match: { hits.hits.0._routing: cat } - --- "Noop one doc": - do: diff --git a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/50_reindex_with_parent_join.yml b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/50_reindex_with_parent_join.yml new file mode 100644 index 00000000000..496c13ec9b4 --- /dev/null +++ b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/50_reindex_with_parent_join.yml @@ -0,0 +1,148 @@ +setup: + - do: + indices.create: + index: source + body: + mappings: + doc: + properties: + join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" } } + + - do: + indices.create: + index: dest + body: + mappings: + doc: + properties: + join_field: { "type": "join", "relations": { "parent": "child", "child": "grand_child" } } + + - do: + index: + index: source + type: doc + id: 1 + body: { "join_field": { "name": "parent" } } + + - do: + index: + index: source + type: doc + id: 2 + routing: 1 + body: { "join_field": { "name": "child", "parent": "1" } } + + - do: + index: + index: source + type: doc + id: 3 + routing: 1 + body: { "join_field": { "name": "grand_child", "parent": "2" } } + + - do: + indices.refresh: {} + + +--- +"Reindex with parent join field": + - do: + reindex: + refresh: true + body: + source: + index: source + dest: + index: dest + - match: {created: 3} + + - do: + search: + index: dest + body: + query: + parent_id: + type: child + id: 1 + - match: {hits.total: 1} + - match: {hits.hits.0._id: "2"} + + - do: + search: + index: dest + body: + query: + has_parent: + parent_type: child + query: + parent_id: + type: child + id: 1 + - match: {hits.total: 1} + - match: {hits.hits.0._id: "3"} + + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} + + +--- +"Reindex from remote with parent join field": + - skip: + reason: Temporarily broken. See https://github.com/elastic/elasticsearch/issues/25363 + version: all + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + index: source + dest: + index: dest + - match: {created: 3} + + - do: + search: + index: dest + body: + query: + parent_id: + type: child + id: 1 + - match: {hits.total: 1} + - match: {hits.hits.0._id: "2"} + + - do: + search: + index: dest + body: + query: + has_parent: + parent_type: child + query: + parent_id: + type: child + id: 1 + - match: {hits.total: 1} + - match: {hits.hits.0._id: "3"} + + # Make sure reindex closed all the scroll contexts + - do: + indices.stats: + index: source + metric: search + - match: {indices.source.total.search.open_contexts: 0} diff --git a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/50_reindex_with_parentchild.yml b/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/50_reindex_with_parentchild.yml deleted file mode 100644 index 81e142c9195..00000000000 --- a/qa/smoke-test-reindex-with-all-modules/src/test/resources/rest-api-spec/test/reindex/50_reindex_with_parentchild.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -"Reindex from remote with parent/child": - - do: - indices.create: - index: source - body: - settings: - mapping.single_type: false - mappings: - foo: {} - bar: - _parent: - type: foo - - do: - indices.create: - index: dest - body: - settings: - mapping.single_type: false - mappings: - foo: {} - bar: - _parent: - type: foo - - do: - index: - index: source - type: foo - id: 1 - body: { "text": "test" } - - do: - index: - index: source - type: bar - id: 1 - parent: 1 - body: { "text": "test2" } - - do: - indices.refresh: {} - - # Fetch the http host. We use the host of the master because we know there will always be a master. - - do: - cluster.state: {} - - set: { master_node: master } - - do: - nodes.info: - metric: [ http ] - - is_true: nodes.$master.http.publish_address - - set: {nodes.$master.http.publish_address: host} - - do: - reindex: - refresh: true - body: - source: - remote: - host: http://${host} - index: source - dest: - index: dest - - match: {created: 2} - - - do: - search: - index: dest - body: - query: - has_parent: - parent_type: foo - query: - match: - text: test - - match: {hits.total: 1} - - # Make sure reindex closed all the scroll contexts - - do: - indices.stats: - index: source - metric: search - - match: {indices.source.total.search.open_contexts: 0} diff --git a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 0df802528b2..fcdb59f2fb7 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -118,27 +118,6 @@ setup() { [ "$status" -eq 3 ] || [ "$status" -eq 4 ] } -@test "[INIT.D] don't mkdir when it contains a comma" { - # Remove these just in case they exist beforehand - rm -rf /tmp/aoeu,/tmp/asdf - rm -rf /tmp/aoeu, - # set DATA_DIR to DATA_DIR=/tmp/aoeu,/tmp/asdf - sed -i 's/DATA_DIR=.*/DATA_DIR=\/tmp\/aoeu,\/tmp\/asdf/' /etc/init.d/elasticsearch - cat /etc/init.d/elasticsearch | grep "DATA_DIR" - run service elasticsearch start - if [ "$status" -ne 0 ]; then - cat /var/log/elasticsearch/* - fail - fi - wait_for_elasticsearch_status - assert_file_not_exist /tmp/aoeu,/tmp/asdf - assert_file_not_exist /tmp/aoeu, - service elasticsearch stop - run service elasticsearch status - # precise returns 4, trusty 3 - [ "$status" -eq 3 ] || [ "$status" -eq 4 ] -} - @test "[INIT.D] start Elasticsearch with custom JVM options" { assert_file_exist $ESENVFILE local es_java_opts=$ES_JAVA_OPTS diff --git a/qa/vagrant/src/test/resources/packaging/tests/75_bad_data_paths.bats b/qa/vagrant/src/test/resources/packaging/tests/75_bad_data_paths.bats deleted file mode 100644 index 59747bd6837..00000000000 --- a/qa/vagrant/src/test/resources/packaging/tests/75_bad_data_paths.bats +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bats - -# Tests data.path settings which in the past have misbehaving, leaking the -# default.data.path setting into the data.path even when it doesn't belong. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It should only be executed -# in a throw-away VM like those made by the Vagrantfile at -# the root of the Elasticsearch source code. This should -# cause the script to fail if it is executed any other way: -[ -f /etc/is_vagrant_vm ] || { - >&2 echo "must be run on a vagrant VM" - exit 1 -} - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load $BATS_UTILS/packages.bash -load $BATS_UTILS/tar.bash -load $BATS_UTILS/utils.bash - -@test "[BAD data.path] install package" { - clean_before_test - skip_not_dpkg_or_rpm - install_package -} - -@test "[BAD data.path] setup funny path.data in package install" { - skip_not_dpkg_or_rpm - local temp=`mktemp -d` - chown elasticsearch:elasticsearch "$temp" - echo "path.data: [$temp]" > "/etc/elasticsearch/elasticsearch.yml" -} - -@test "[BAD data.path] start installed from package" { - skip_not_dpkg_or_rpm - start_elasticsearch_service green -} - -@test "[BAD data.path] check for bad dir after starting from package" { - skip_not_dpkg_or_rpm - assert_file_not_exist /var/lib/elasticsearch/nodes -} - -@test "[BAD data.path] install tar" { - clean_before_test - install_archive -} - -@test "[BAD data.path] setup funny path.data in tar install" { - local temp=`mktemp -d` - chown elasticsearch:elasticsearch "$temp" - echo "path.data: [$temp]" > "/tmp/elasticsearch/config/elasticsearch.yml" -} - -@test "[BAD data.path] start installed from tar" { - start_elasticsearch_service green "" "-Edefault.path.data=/tmp/elasticsearch/data" -} - -@test "[BAD data.path] check for bad dir after starting from tar" { - assert_file_not_exist "/tmp/elasticsearch/data/nodes" -} diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index aee9f7e5060..d1c03a441f4 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -348,7 +348,7 @@ run_elasticsearch_service() { local CONF_DIR="" local ES_PATH_CONF="" else - local ES_PATH_CONF="-Epath.conf=$CONF_DIR" + local ES_PATH_CONF="--path.conf $CONF_DIR" fi # we must capture the exit code to compare so we don't want to start as background process in case we expect something other than 0 local background="" diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index d3b0f7f99cf..b501ffe168b 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -17,7 +17,9 @@ * under the License. */ +import java.util.Locale import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' @@ -58,4 +60,28 @@ task integTest { dependsOn = ["v${indexCompatVersions[-1]}#bwcTest"] } -check.dependsOn(integTest) +task verifyDocsLuceneVersion { + doFirst { + File docsVersionsFile = rootProject.file('docs/Versions.asciidoc') + List versionLines = docsVersionsFile.readLines('UTF-8') + String docsLuceneVersion = null + for (String line : versionLines) { + if (line.startsWith(':lucene_version:')) { + docsLuceneVersion = line.split()[1] + } + } + if (docsLuceneVersion == null) { + throw new GradleException('Could not find lucene version in docs version file') + } + String expectedLuceneVersion = VersionProperties.lucene + if (expectedLuceneVersion.contains('-snapshot-')) { + expectedLuceneVersion = expectedLuceneVersion.substring(0, expectedLuceneVersion.lastIndexOf('-')) + expectedLuceneVersion = expectedLuceneVersion.toUpperCase(Locale.ROOT) + } + if (docsLuceneVersion != expectedLuceneVersion) { + throw new GradleException("Lucene version in docs [${docsLuceneVersion}] does not match version.properties [${expectedLuceneVersion}]") + } + } +} + +check.dependsOn integTest, verifyDocsLuceneVersion diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json index 703fa73f8d3..a32974d017f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json @@ -25,14 +25,6 @@ "options" : ["open","closed","none","all"], "default" : "open", "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "force": { - "type" : "boolean", - "description" : "Force a refresh even if not required", - "default": false - }, - "operation_threading": { - "description" : "TODO: ?" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml index f48f73cb478..e1225ef5da6 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml @@ -58,6 +58,38 @@ $body: | /^ http \n ((\d{1,3}\.){3}\d{1,3}:\d{1,5}\n)+ $/ +--- +"Additional disk information": + - skip: + version: " - 5.5.99" + reason: additional disk info added in 5.6.0 + + - do: + cat.nodes: + h: diskAvail,diskTotal,diskUsed,diskUsedPercent + v: true + + - match: + # leading whitespace on columns and optional whitespace on values is necessary + # because `diskAvail` is right aligned and text representation of disk size might be + # longer so it's padded with leading whitespace + $body: | + /^ \s* diskAvail \s+ diskTotal \s+ diskUsed \s+ diskUsedPercent \n + (\s* \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b\s+ (100\.00 | \d{1,2}\.\d{2}) \n)+ $/ + + - do: + cat.nodes: + h: disk,dt,du,dup + v: true + + - match: + # leading whitespace on columns and optional whitespace on values is necessary + # because `disk` is right aligned and text representation of disk size might be + # longer so it's padded with leading whitespace + $body: | + /^ \s* disk \s+ dt \s+ du \s+ dup \n + (\s* \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+(\.\d+)?[ptgmk]?b\s+ (100\.00 | \d{1,2}\.\d{2}) \n)+ $/ + --- "Test cat nodes output with full_id set": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 93ce5c8c807..358023c25af 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -58,6 +58,9 @@ --- "Custom filter in request": + - skip: + version: " - 5.99.99" + reason: token filter name changed in 6.0, so this needs to be skipped on mixed clusters - do: indices.analyze: body: @@ -73,5 +76,38 @@ - match: { detail.tokenizer.tokens.0.token: foo } - match: { detail.tokenizer.tokens.1.token: bar } - match: { detail.tokenizer.tokens.2.token: buzz } - - match: { detail.tokenfilters.0.name: "_anonymous_tokenfilter_[0]" } + - match: { detail.tokenfilters.0.name: "_anonymous_tokenfilter" } - match: { detail.tokenfilters.0.tokens.0.token: bar } + +--- +"Synonym filter with tokenizer": + - skip: + version: " - 5.99.99" + reason: to support synonym same analysis chain were added in 6.0.0 + - do: + indices.create: + index: test_synonym + body: + settings: + index: + analysis: + tokenizer: + trigram: + type: nGram + min_gram: 3 + max_gram: 3 + filter: + synonym: + type: synonym + synonyms: ["kimchy => shay"] + + - do: + indices.analyze: + index: test_synonym + body: + tokenizer: trigram + filter: [synonym] + text: kimchy + - length: { tokens: 2 } + - match: { tokens.0.token: sha } + - match: { tokens.1.token: hay } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml index fb56ab4e4d6..278fd1ca8e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml @@ -2,14 +2,12 @@ "Exists type": - skip: version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards + reason: multiple types are not supported on 6.x indices onwards - do: indices.create: index: test_1 body: - settings: - mapping.single_type: false mappings: type_1: {} type_2: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml index b17f2512b66..90bb2747a7b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml @@ -4,21 +4,14 @@ setup: indices.create: index: test_1 body: - settings: - mapping.single_type: false mappings: - type_1: {} - type_2: {} + doc: {} - do: indices.create: index: test_2 body: - settings: - mapping.single_type: false mappings: - type_2: {} - type_3: {} - + doc: {} --- "Get /{index}/_mapping with empty mappings": @@ -35,191 +28,117 @@ setup: --- "Get /_mapping": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: {} - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_true: test_2.mappings.type_3 + - is_true: test_1.mappings.doc + - is_true: test_2.mappings.doc --- "Get /{index}/_mapping": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: test_1 - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 + - is_true: test_1.mappings.doc - is_false: test_2 --- "Get /{index}/_mapping/_all": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: test_1 type: _all - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 + - is_true: test_1.mappings.doc - is_false: test_2 --- "Get /{index}/_mapping/*": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: test_1 type: '*' - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 + - is_true: test_1.mappings.doc - is_false: test_2 --- "Get /{index}/_mapping/{type}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: test_1 - type: type_1 + type: doc - - is_false: test_1.mappings.type_2 - - is_false: test_2 - ---- -"Get /{index}/_mapping/{type,type}": - - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - - do: - indices.get_mapping: - index: test_1 - type: type_1,type_2 - - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 + - is_true: test_1.mappings.doc - is_false: test_2 --- "Get /{index}/_mapping/{type*}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: test_1 - type: '*2' + type: 'd*' - - is_true: test_1.mappings.type_2 - - is_false: test_1.mappings.type_1 + - is_true: test_1.mappings.doc - is_false: test_2 --- "Get /_mapping/{type}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: - type: type_2 + type: doc - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2.mappings.type_3 + - is_true: test_1.mappings.doc + - is_true: test_2.mappings.doc --- "Get /_all/_mapping/{type}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: _all - type: type_2 + type: doc - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2.mappings.type_3 + - is_true: test_1.mappings.doc + - is_true: test_2.mappings.doc --- "Get /*/_mapping/{type}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: '*' - type: type_2 + type: doc - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2.mappings.type_3 + - is_true: test_1.mappings.doc + - is_true: test_2.mappings.doc --- "Get /index,index/_mapping/{type}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: test_1,test_2 - type: type_2 + type: doc - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_2.mappings.type_3 + - is_true: test_1.mappings.doc + - is_true: test_2.mappings.doc --- "Get /index*/_mapping/{type}": - - skip: - version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards - - do: indices.get_mapping: index: '*2' - type: type_2 + type: doc - - is_true: test_2.mappings.type_2 + - is_true: test_2.mappings.doc - is_false: test_1 - - is_false: test_2.mappings.type_3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml new file mode 100644 index 00000000000..9b36ac15357 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml @@ -0,0 +1,208 @@ +--- +setup: + - do: + indices.create: + index: test_1 + body: + mappings: + type_1: {} + type_2: {} + - do: + indices.create: + index: test_2 + body: + mappings: + type_2: {} + type_3: {} + +--- +"Get /_mapping": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: {} + + - is_true: test_1.mappings.type_1 + - is_true: test_1.mappings.type_2 + - is_true: test_2.mappings.type_2 + - is_true: test_2.mappings.type_3 + +--- +"Get /{index}/_mapping": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1 + + - is_true: test_1.mappings.type_1 + - is_true: test_1.mappings.type_2 + - is_false: test_2 + + +--- +"Get /{index}/_mapping/_all": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1 + type: _all + + - is_true: test_1.mappings.type_1 + - is_true: test_1.mappings.type_2 + - is_false: test_2 + +--- +"Get /{index}/_mapping/*": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1 + type: '*' + + - is_true: test_1.mappings.type_1 + - is_true: test_1.mappings.type_2 + - is_false: test_2 + +--- +"Get /{index}/_mapping/{type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1 + type: type_1 + + - is_false: test_1.mappings.type_2 + - is_false: test_2 + +--- +"Get /{index}/_mapping/{type,type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1 + type: type_1,type_2 + + - is_true: test_1.mappings.type_1 + - is_true: test_1.mappings.type_2 + - is_false: test_2 + +--- +"Get /{index}/_mapping/{type*}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1 + type: '*2' + + - is_true: test_1.mappings.type_2 + - is_false: test_1.mappings.type_1 + - is_false: test_2 + +--- +"Get /_mapping/{type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + type: type_2 + + - is_true: test_1.mappings.type_2 + - is_true: test_2.mappings.type_2 + - is_false: test_1.mappings.type_1 + - is_false: test_2.mappings.type_3 + +--- +"Get /_all/_mapping/{type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: _all + type: type_2 + + - is_true: test_1.mappings.type_2 + - is_true: test_2.mappings.type_2 + - is_false: test_1.mappings.type_1 + - is_false: test_2.mappings.type_3 + +--- +"Get /*/_mapping/{type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: '*' + type: type_2 + + - is_true: test_1.mappings.type_2 + - is_true: test_2.mappings.type_2 + - is_false: test_1.mappings.type_1 + - is_false: test_2.mappings.type_3 + +--- +"Get /index,index/_mapping/{type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: test_1,test_2 + type: type_2 + + - is_true: test_1.mappings.type_2 + - is_true: test_2.mappings.type_2 + - is_false: test_2.mappings.type_3 + +--- +"Get /index*/_mapping/{type}": + + - skip: + version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node + reason: multiple types are not supported on 6.x indices onwards + + - do: + indices.get_mapping: + index: '*2' + type: type_2 + + - is_true: test_2.mappings.type_2 + - is_false: test_1 + - is_false: test_2.mappings.type_3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml new file mode 100644 index 00000000000..df0125187d6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -0,0 +1,62 @@ +--- +setup: + - do: + indices.create: + index: test + +--- +"Translog retention": + - skip: + version: " - 5.99.0" + reason: translog retention was added in 6.0.0 + - do: + indices.stats: + metric: [ translog ] + - set: { indices.test.primaries.translog.size_in_bytes: empty_size } + + - do: + index: + index: test + type: bar + id: 1 + body: { "foo": "bar" } + + - do: + indices.stats: + metric: [ translog ] + - gt: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - match: { indices.test.primaries.translog.operations: 1 } + - gt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + - match: { indices.test.primaries.translog.uncommitted_operations: 1 } + + - do: + indices.flush: + index: test + + - do: + indices.stats: + metric: [ translog ] + - gt: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - match: { indices.test.primaries.translog.operations: 1 } + - match: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } + + - do: + indices.put_settings: + index: test + body: + index.translog.retention.size: -1 + index.translog.retention.age: -1 + + - do: + indices.flush: + index: test + force: true # force flush as we don't have pending ops + + - do: + indices.stats: + metric: [ translog ] + - match: { indices.test.primaries.translog.size_in_bytes: $empty_size } + - match: { indices.test.primaries.translog.operations: 0 } + - match: { indices.test.primaries.translog.uncommitted_size_in_bytes: $empty_size } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml index 7827965ec69..1dd851554b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml @@ -2,14 +2,11 @@ "IDs": - skip: version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards + reason: multiple types are not supported on 6.x indices onwards - do: indices.create: index: test_1 - body: - settings: - mapping.single_type: false - do: index: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml new file mode 100644 index 00000000000..d4cb980a05c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml @@ -0,0 +1,49 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + doc: + "properties": + "title": + "type": "text" + "term_vector": "with_positions_offsets" + "description": + "type": "text" + "term_vector": "with_positions_offsets" + - do: + index: + index: test + type: doc + id: 1 + body: + "title" : "The quick brown fox is brown" + "description" : "The quick pink panther is pink" + - do: + indices.refresh: {} + +--- +"Highlight query": + - skip: + version: " - 5.5.99" + reason: bug fixed in 5.6 + - do: + search: + body: + highlight: + type: fvh + fields: + description: + type: fvh + highlight_query: + prefix: + description: br + title: + type: fvh + highlight_query: + prefix: + title: br + + - match: {hits.hits.0.highlight.title.0: "The quick brown fox is brown"} + - is_false: hits.hits.0.highlight.description diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index 3c2eba90e6f..e90fda9fe0d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -4,8 +4,6 @@ setup: indices.create: index: test body: - settings: - mapping.single_type: false mappings: type_1: properties: @@ -16,7 +14,7 @@ setup: "Nested inner hits": - skip: version: "5.99.99 - "# this will only run in a mixed cluster environment with at least 1 5.x node - reason: mapping.single_type can not be changed on 6.x indices onwards + reason: multiple types are not supported on 6.x indices onwards - do: index: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml index 515662355da..70008415122 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml @@ -32,6 +32,7 @@ setup: snapshot: test_snapshot - is_true: snapshots + - is_true: snapshots.0.failures - do: snapshot.delete: @@ -87,6 +88,8 @@ setup: - is_true: snapshots - match: { snapshots.0.snapshot: test_snapshot } - match: { snapshots.0.state: SUCCESS } + - is_false: snapshots.0.failures + - is_false: snapshots.0.shards - is_false: snapshots.0.version - do: diff --git a/test/fixtures/example-fixture/build.gradle b/test/fixtures/example-fixture/build.gradle index 17a4586a54d..225a2cf9deb 100644 --- a/test/fixtures/example-fixture/build.gradle +++ b/test/fixtures/example-fixture/build.gradle @@ -19,3 +19,6 @@ apply plugin: 'elasticsearch.build' test.enabled = false +// Not published so no need to assemble +tasks.remove(assemble) +build.dependsOn.remove('assemble') diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java index 7b575d0b7a6..031f6b247c2 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java @@ -50,11 +50,9 @@ abstract class ESElasticsearchCliTestCase extends ESTestCase { final AtomicBoolean init = new AtomicBoolean(); final int status = Elasticsearch.main(args, new Elasticsearch() { @Override - protected Environment createEnv(Terminal terminal, Map settings) { - Settings realSettings = Settings.builder() - .put("path.home", home) - .put(settings).build(); - return new Environment(realSettings); + protected Environment createEnv(Terminal terminal, Map settings, Path configPath) { + final Settings realSettings = Settings.builder().put("path.home", home).put(settings).build(); + return new Environment(realSettings, configPath); } @Override void init(final boolean daemonize, final Path pidFile, final boolean quiet, Environment initialEnv) { diff --git a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java index 21cd1961d7c..22496642cb9 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java +++ b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java @@ -26,6 +26,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; /** * A mock implementation of secure settings for tests to use. @@ -35,6 +36,7 @@ public class MockSecureSettings implements SecureSettings { private Map secureStrings = new HashMap<>(); private Map files = new HashMap<>(); private Set settingNames = new HashSet<>(); + private final AtomicBoolean closed = new AtomicBoolean(false); @Override public boolean isLoaded() { @@ -48,24 +50,48 @@ public class MockSecureSettings implements SecureSettings { @Override public SecureString getString(String setting) { + ensureOpen(); return secureStrings.get(setting); } @Override public InputStream getFile(String setting) { + ensureOpen(); return new ByteArrayInputStream(files.get(setting)); } public void setString(String setting, String value) { + ensureOpen(); secureStrings.put(setting, new SecureString(value.toCharArray())); settingNames.add(setting); } public void setFile(String setting, byte[] value) { + ensureOpen(); files.put(setting, value); settingNames.add(setting); } + /** Merge the given secure settings into this one. */ + public void merge(MockSecureSettings secureSettings) { + for (String setting : secureSettings.getSettingNames()) { + if (settingNames.contains(setting)) { + throw new IllegalArgumentException("Cannot overwrite existing secure setting " + setting); + } + } + settingNames.addAll(secureSettings.settingNames); + secureStrings.putAll(secureSettings.secureStrings); + files.putAll(secureSettings.files); + } + @Override - public void close() throws IOException {} + public void close() throws IOException { + closed.set(true); + } + + private void ensureOpen() { + if (closed.get()) { + throw new IllegalStateException("secure settings are already closed"); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/test/framework/src/main/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java index d75a894d073..5b99aed66b4 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java @@ -35,9 +35,8 @@ import java.util.Arrays; public class AnalysisTestsHelper { - public static ESTestCase.TestAnalysis createTestAnalysisFromClassPath(Path baseDir, - String resource) throws IOException { - Settings settings = Settings.builder() + public static ESTestCase.TestAnalysis createTestAnalysisFromClassPath(final Path baseDir, final String resource) throws IOException { + final Settings settings = Settings.builder() .loadFromStream(resource, AnalysisTestsHelper.class.getResourceAsStream(resource)) .put(Environment.PATH_HOME_SETTING.getKey(), baseDir.toString()) .build(); @@ -46,18 +45,27 @@ public class AnalysisTestsHelper { } public static ESTestCase.TestAnalysis createTestAnalysisFromSettings( - Settings settings, AnalysisPlugin... plugins) throws IOException { - if (settings.get(IndexMetaData.SETTING_VERSION_CREATED) == null) { - settings = Settings.builder().put(settings) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); - } - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); - AnalysisRegistry analysisRegistry = - new AnalysisModule(new Environment(settings), Arrays.asList(plugins)) - .getAnalysisRegistry(); - return new ESTestCase.TestAnalysis(analysisRegistry.build(indexSettings), - analysisRegistry.buildTokenFilterFactories(indexSettings), - analysisRegistry.buildTokenizerFactories(indexSettings), - analysisRegistry.buildCharFilterFactories(indexSettings)); + final Settings settings, final AnalysisPlugin... plugins) throws IOException { + return createTestAnalysisFromSettings(settings, null, plugins); } + + public static ESTestCase.TestAnalysis createTestAnalysisFromSettings( + final Settings settings, + final Path configPath, + final AnalysisPlugin... plugins) throws IOException { + final Settings actualSettings; + if (settings.get(IndexMetaData.SETTING_VERSION_CREATED) == null) { + actualSettings = Settings.builder().put(settings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + } else { + actualSettings = settings; + } + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", actualSettings); + final AnalysisRegistry analysisRegistry = + new AnalysisModule(new Environment(actualSettings, configPath), Arrays.asList(plugins)).getAnalysisRegistry(); + return new ESTestCase.TestAnalysis(analysisRegistry.build(indexSettings), + analysisRegistry.buildTokenFilterFactories(indexSettings), + analysisRegistry.buildTokenizerFactories(indexSettings), + analysisRegistry.buildCharFilterFactories(indexSettings)); + } + } diff --git a/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java b/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java similarity index 96% rename from core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java rename to test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java index 1c9a4798139..921a09e98e6 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.analysis.filter1; +package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; diff --git a/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java index b4e01b18a56..079c784342b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java @@ -37,7 +37,7 @@ public abstract class AbstractAsyncBulkByScrollActionTestCase< @Before public void setupForTest() { threadPool = new TestThreadPool(getTestName()); - task = new WorkingBulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID, null, 0); + task = new WorkingBulkByScrollTask(1, "test", "test", "test", TaskId.EMPTY_TASK_ID, null, Float.MAX_VALUE); } @After diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4600c80b7a8..e9e58ef5127 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; @@ -54,6 +55,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.similarity.SimilarityService; @@ -81,6 +83,7 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; +import java.util.function.Consumer; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; @@ -352,7 +355,11 @@ public abstract class IndexShardTestCase extends ESTestCase { getFakeDiscoNode(primary.routingEntry().currentNodeId()), null)); primary.recoverFromStore(); - primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry())); + updateRoutingEntry(primary, ShardRoutingHelper.moveToStarted(primary.routingEntry())); + } + + public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRouting) throws IOException { + shard.updateShardState(shardRouting, shard.getPrimaryTerm(), null, 0L, Collections.emptySet(), Collections.emptySet()); } protected void recoveryEmptyReplica(IndexShard replica) throws IOException { @@ -421,7 +428,7 @@ public abstract class IndexShardTestCase extends ESTestCase { Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build()); recovery.recoverToTarget(); recoveryTarget.markAsDone(); - replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry())); + updateRoutingEntry(replica, ShardRoutingHelper.moveToStarted(replica.routingEntry())); } private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException { @@ -467,44 +474,49 @@ public abstract class IndexShardTestCase extends ESTestCase { } - protected Engine.Index indexDoc(IndexShard shard, String type, String id) throws IOException { + protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) throws IOException { return indexDoc(shard, type, id, "{}"); } - protected Engine.Index indexDoc(IndexShard shard, String type, String id, String source) throws IOException { + protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { return indexDoc(shard, type, id, source, XContentType.JSON); } - protected Engine.Index indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType) throws IOException { - final Engine.Index index; + protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType) + throws IOException { + SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType); if (shard.routingEntry().primary()) { - index = shard.prepareIndexOnPrimary( - SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), - xContentType), - Versions.MATCH_ANY, - VersionType.INTERNAL, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, - false); + return shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type)); } else { - index = shard.prepareIndexOnReplica( - SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), - xContentType), - shard.seqNoStats().getMaxSeqNo() + 1, shard.getPrimaryTerm(), 0, - VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + return shard.applyIndexOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, shard.getPrimaryTerm(), 0, + VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, getMappingUpdater(shard, type)); } - shard.index(index); - return index; } - protected Engine.Delete deleteDoc(IndexShard shard, String type, String id) throws IOException { - final Engine.Delete delete; + protected Consumer getMappingUpdater(IndexShard shard, String type) { + return update -> { + try { + updateMappings(shard, IndexMetaData.builder(shard.indexSettings().getIndexMetaData()) + .putMapping(type, update.toString()).build()); + } catch (IOException e) { + ExceptionsHelper.reThrowIfNotNull(e); + } + }; + } + + protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) { + shard.indexSettings().updateIndexMetaData(indexMetadata); + shard.mapperService().merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE, true); + } + + protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException { if (shard.routingEntry().primary()) { - delete = shard.prepareDeleteOnPrimary(type, id, Versions.MATCH_ANY, VersionType.INTERNAL); + return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL, update -> {}); } else { - delete = shard.prepareDeleteOnPrimary(type, id, 1, VersionType.EXTERNAL); + return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, shard.getPrimaryTerm(), + 0L, type, id, VersionType.EXTERNAL, update -> {}); } - shard.delete(delete); - return delete; } protected void flushShard(IndexShard shard) { diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index a3fe52d005c..97035623a6c 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -22,7 +22,6 @@ package org.elasticsearch.indices.analysis; import org.apache.lucene.analysis.util.CharFilterFactory; import org.apache.lucene.analysis.util.TokenFilterFactory; import org.apache.lucene.analysis.util.TokenizerFactory; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.index.analysis.ApostropheFilterFactory; import org.elasticsearch.index.analysis.ArabicNormalizationFilterFactory; @@ -36,27 +35,20 @@ import org.elasticsearch.index.analysis.CommonGramsTokenFilterFactory; import org.elasticsearch.index.analysis.CzechStemTokenFilterFactory; import org.elasticsearch.index.analysis.DecimalDigitFilterFactory; import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory; -import org.elasticsearch.index.analysis.EdgeNGramTokenFilterFactory; import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory; -import org.elasticsearch.index.analysis.ElisionTokenFilterFactory; -import org.elasticsearch.index.analysis.FlattenGraphTokenFilterFactory; import org.elasticsearch.index.analysis.GermanNormalizationFilterFactory; import org.elasticsearch.index.analysis.GermanStemTokenFilterFactory; import org.elasticsearch.index.analysis.HindiNormalizationFilterFactory; import org.elasticsearch.index.analysis.HunspellTokenFilterFactory; import org.elasticsearch.index.analysis.IndicNormalizationFilterFactory; -import org.elasticsearch.index.analysis.KStemTokenFilterFactory; import org.elasticsearch.index.analysis.KeepTypesFilterFactory; import org.elasticsearch.index.analysis.KeepWordFilterFactory; import org.elasticsearch.index.analysis.KeywordTokenizerFactory; -import org.elasticsearch.index.analysis.LengthTokenFilterFactory; import org.elasticsearch.index.analysis.LetterTokenizerFactory; import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory; -import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory; import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory; import org.elasticsearch.index.analysis.MinHashTokenFilterFactory; import org.elasticsearch.index.analysis.MultiTermAwareComponent; -import org.elasticsearch.index.analysis.NGramTokenFilterFactory; import org.elasticsearch.index.analysis.NGramTokenizerFactory; import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory; import org.elasticsearch.index.analysis.PatternCaptureGroupTokenFilterFactory; @@ -66,7 +58,6 @@ import org.elasticsearch.index.analysis.PersianNormalizationFilterFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; -import org.elasticsearch.index.analysis.ReverseTokenFilterFactory; import org.elasticsearch.index.analysis.ScandinavianFoldingFilterFactory; import org.elasticsearch.index.analysis.ScandinavianNormalizationFilterFactory; import org.elasticsearch.index.analysis.SerbianNormalizationFilterFactory; @@ -74,23 +65,16 @@ import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory; import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; -import org.elasticsearch.index.analysis.StemmerOverrideTokenFilterFactory; -import org.elasticsearch.index.analysis.StemmerTokenFilterFactory; import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymTokenFilterFactory; import org.elasticsearch.index.analysis.ThaiTokenizerFactory; -import org.elasticsearch.index.analysis.TruncateTokenFilterFactory; import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory; -import org.elasticsearch.index.analysis.UpperCaseTokenFilterFactory; import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory; -import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory; -import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.test.ESTestCase; import java.util.Collection; -import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Locale; @@ -155,7 +139,7 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { .put("arabicstem", ArabicStemTokenFilterFactory.class) .put("asciifolding", MovedToAnalysisCommon.class) .put("brazilianstem", BrazilianStemTokenFilterFactory.class) - .put("bulgarianstem", StemmerTokenFilterFactory.class) + .put("bulgarianstem", MovedToAnalysisCommon.class) .put("cjkbigram", CJKBigramFilterFactory.class) .put("cjkwidth", CJKWidthFilterFactory.class) .put("classic", ClassicFilterFactory.class) @@ -164,50 +148,50 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { .put("czechstem", CzechStemTokenFilterFactory.class) .put("decimaldigit", DecimalDigitFilterFactory.class) .put("delimitedpayload", DelimitedPayloadTokenFilterFactory.class) - .put("dictionarycompoundword", DictionaryCompoundWordTokenFilterFactory.class) - .put("edgengram", EdgeNGramTokenFilterFactory.class) - .put("elision", ElisionTokenFilterFactory.class) - .put("englishminimalstem", StemmerTokenFilterFactory.class) - .put("englishpossessive", StemmerTokenFilterFactory.class) - .put("finnishlightstem", StemmerTokenFilterFactory.class) - .put("frenchlightstem", StemmerTokenFilterFactory.class) - .put("frenchminimalstem", StemmerTokenFilterFactory.class) - .put("galicianminimalstem", StemmerTokenFilterFactory.class) - .put("galicianstem", StemmerTokenFilterFactory.class) + .put("dictionarycompoundword", MovedToAnalysisCommon.class) + .put("edgengram", MovedToAnalysisCommon.class) + .put("elision", MovedToAnalysisCommon.class) + .put("englishminimalstem", MovedToAnalysisCommon.class) + .put("englishpossessive", MovedToAnalysisCommon.class) + .put("finnishlightstem", MovedToAnalysisCommon.class) + .put("frenchlightstem", MovedToAnalysisCommon.class) + .put("frenchminimalstem", MovedToAnalysisCommon.class) + .put("galicianminimalstem", MovedToAnalysisCommon.class) + .put("galicianstem", MovedToAnalysisCommon.class) .put("germanstem", GermanStemTokenFilterFactory.class) - .put("germanlightstem", StemmerTokenFilterFactory.class) - .put("germanminimalstem", StemmerTokenFilterFactory.class) + .put("germanlightstem", MovedToAnalysisCommon.class) + .put("germanminimalstem", MovedToAnalysisCommon.class) .put("germannormalization", GermanNormalizationFilterFactory.class) - .put("greeklowercase", LowerCaseTokenFilterFactory.class) - .put("greekstem", StemmerTokenFilterFactory.class) + .put("greeklowercase", MovedToAnalysisCommon.class) + .put("greekstem", MovedToAnalysisCommon.class) .put("hindinormalization", HindiNormalizationFilterFactory.class) - .put("hindistem", StemmerTokenFilterFactory.class) - .put("hungarianlightstem", StemmerTokenFilterFactory.class) + .put("hindistem", MovedToAnalysisCommon.class) + .put("hungarianlightstem", MovedToAnalysisCommon.class) .put("hunspellstem", HunspellTokenFilterFactory.class) - .put("hyphenationcompoundword", HyphenationCompoundWordTokenFilterFactory.class) + .put("hyphenationcompoundword", MovedToAnalysisCommon.class) .put("indicnormalization", IndicNormalizationFilterFactory.class) - .put("irishlowercase", LowerCaseTokenFilterFactory.class) - .put("indonesianstem", StemmerTokenFilterFactory.class) - .put("italianlightstem", StemmerTokenFilterFactory.class) + .put("irishlowercase", MovedToAnalysisCommon.class) + .put("indonesianstem", MovedToAnalysisCommon.class) + .put("italianlightstem", MovedToAnalysisCommon.class) .put("keepword", KeepWordFilterFactory.class) .put("keywordmarker", MovedToAnalysisCommon.class) - .put("kstem", KStemTokenFilterFactory.class) - .put("latvianstem", StemmerTokenFilterFactory.class) - .put("length", LengthTokenFilterFactory.class) + .put("kstem", MovedToAnalysisCommon.class) + .put("latvianstem", MovedToAnalysisCommon.class) + .put("length", MovedToAnalysisCommon.class) .put("limittokencount", LimitTokenCountFilterFactory.class) - .put("lowercase", LowerCaseTokenFilterFactory.class) - .put("ngram", NGramTokenFilterFactory.class) - .put("norwegianlightstem", StemmerTokenFilterFactory.class) - .put("norwegianminimalstem", StemmerTokenFilterFactory.class) + .put("lowercase", MovedToAnalysisCommon.class) + .put("ngram", MovedToAnalysisCommon.class) + .put("norwegianlightstem", MovedToAnalysisCommon.class) + .put("norwegianminimalstem", MovedToAnalysisCommon.class) .put("patterncapturegroup", PatternCaptureGroupTokenFilterFactory.class) .put("patternreplace", PatternReplaceTokenFilterFactory.class) .put("persiannormalization", PersianNormalizationFilterFactory.class) .put("porterstem", MovedToAnalysisCommon.class) - .put("portuguesestem", StemmerTokenFilterFactory.class) - .put("portugueselightstem", StemmerTokenFilterFactory.class) - .put("portugueseminimalstem", StemmerTokenFilterFactory.class) - .put("reversestring", ReverseTokenFilterFactory.class) - .put("russianlightstem", StemmerTokenFilterFactory.class) + .put("portuguesestem", MovedToAnalysisCommon.class) + .put("portugueselightstem", MovedToAnalysisCommon.class) + .put("portugueseminimalstem", MovedToAnalysisCommon.class) + .put("reversestring", MovedToAnalysisCommon.class) + .put("russianlightstem", MovedToAnalysisCommon.class) .put("scandinavianfolding", ScandinavianFoldingFilterFactory.class) .put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class) .put("serbiannormalization", SerbianNormalizationFilterFactory.class) @@ -215,22 +199,22 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { .put("minhash", MinHashTokenFilterFactory.class) .put("snowballporter", MovedToAnalysisCommon.class) .put("soraninormalization", SoraniNormalizationFilterFactory.class) - .put("soranistem", StemmerTokenFilterFactory.class) - .put("spanishlightstem", StemmerTokenFilterFactory.class) + .put("soranistem", MovedToAnalysisCommon.class) + .put("spanishlightstem", MovedToAnalysisCommon.class) .put("standard", StandardTokenFilterFactory.class) - .put("stemmeroverride", StemmerOverrideTokenFilterFactory.class) + .put("stemmeroverride", MovedToAnalysisCommon.class) .put("stop", StopTokenFilterFactory.class) - .put("swedishlightstem", StemmerTokenFilterFactory.class) + .put("swedishlightstem", MovedToAnalysisCommon.class) .put("synonym", SynonymTokenFilterFactory.class) .put("synonymgraph", SynonymGraphTokenFilterFactory.class) .put("trim", MovedToAnalysisCommon.class) - .put("truncate", TruncateTokenFilterFactory.class) - .put("turkishlowercase", LowerCaseTokenFilterFactory.class) + .put("truncate", MovedToAnalysisCommon.class) + .put("turkishlowercase", MovedToAnalysisCommon.class) .put("type", KeepTypesFilterFactory.class) - .put("uppercase", UpperCaseTokenFilterFactory.class) + .put("uppercase", MovedToAnalysisCommon.class) .put("worddelimiter", MovedToAnalysisCommon.class) .put("worddelimitergraph", MovedToAnalysisCommon.class) - .put("flattengraph", FlattenGraphTokenFilterFactory.class) + .put("flattengraph", MovedToAnalysisCommon.class) // TODO: these tokenfilters are not yet exposed: useful? @@ -262,6 +246,9 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { .put("daterecognizer", Void.class) // for token filters that generate bad offsets, which are now rejected since Lucene 7 .put("fixbrokenoffsets", Void.class) + // should we expose it, or maybe think about higher level integration of the + // fake term frequency feature (LUCENE-7854) + .put("delimitedtermfrequency", Void.class) .immutableMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 8774ba5836b..61958385017 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -37,13 +37,16 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportService; +import java.nio.file.Path; import java.util.Collection; +import java.util.Collections; import java.util.function.Function; /** @@ -57,7 +60,11 @@ public class MockNode extends Node { private final Collection> classpathPlugins; public MockNode(Settings settings, Collection> classpathPlugins) { - super(InternalSettingsPreparer.prepareEnvironment(settings, null), classpathPlugins); + this(settings, classpathPlugins, null); + } + + public MockNode(Settings settings, Collection> classpathPlugins, Path configPath) { + super(InternalSettingsPreparer.prepareEnvironment(settings, null, Collections.emptyMap(), configPath), classpathPlugins); this.classpathPlugins = classpathPlugins; } @@ -104,8 +111,8 @@ public class MockNode extends Node { } @Override - protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { - return new MockNode(settings, classpathPlugins); + protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins, Path configPath) { + return new MockNode(settings, classpathPlugins, configPath); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 2ca6f2aa0c7..407b20ef778 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -219,7 +219,7 @@ public class MockScriptEngine implements ScriptEngine { } @Override - public boolean needsScores() { + public boolean needs_score() { return true; } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index e27199918f4..432b05d6b54 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -85,7 +85,7 @@ public abstract class BaseAggregationTestCase entries = new ArrayList<>(); entries.addAll(indicesModule.getNamedWriteables()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index f2b166fcd6a..6f0bfd83265 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -1007,7 +1007,8 @@ public abstract class AbstractQueryTestCase> ServiceHolder(Settings nodeSettings, Settings indexSettings, Collection> plugins, AbstractQueryTestCase testCase) throws IOException { Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, null); - PluginsService pluginsService = new PluginsService(nodeSettings, env.modulesFile(), env.pluginsFile(), plugins); + PluginsService pluginsService; + pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins); client = (Client) Proxy.newProxyInstance( Client.class.getClassLoader(), diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 6bc9d89558d..357658d1575 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1731,6 +1731,10 @@ public abstract class ESIntegTestCase extends ESTestCase { return builder.build(); } + protected Path nodeConfigPath(int nodeOrdinal) { + return null; + } + /** * Returns a collection of plugins that should be loaded on each node. */ @@ -1839,6 +1843,11 @@ public abstract class ESIntegTestCase extends ESTestCase { put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return ESIntegTestCase.this.nodeConfigPath(nodeOrdinal); + } + @Override public Collection> nodePlugins() { return ESIntegTestCase.this.nodePlugins(); @@ -2153,10 +2162,6 @@ public abstract class ESIntegTestCase extends ESTestCase { .put(settings) .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); - Path configDir = indexDir.resolve("config"); - if (Files.exists(configDir)) { - builder.put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()); - } return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 0d3e8131ab2..bf8888fa28e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -29,7 +29,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -48,6 +47,7 @@ import org.elasticsearch.bootstrap.BootstrapForTesting; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtilsForTesting; @@ -633,7 +633,7 @@ public abstract class ESTestCase extends LuceneTestCase { private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m", "micros", "nanos"}; - public static String randomTimeValue(int lower, int upper, String[] suffixes) { + public static String randomTimeValue(int lower, int upper, String... suffixes) { return randomIntBetween(lower, upper) + randomFrom(suffixes); } @@ -692,14 +692,14 @@ public abstract class ESTestCase extends LuceneTestCase { /** * Runs the code block for 10 seconds waiting for no assertion to trip. */ - public static void assertBusy(Runnable codeBlock) throws Exception { + public static void assertBusy(CheckedRunnable codeBlock) throws Exception { assertBusy(codeBlock, 10, TimeUnit.SECONDS); } /** * Runs the code block for the provided interval, waiting for no assertions to trip. */ - public static void assertBusy(Runnable codeBlock, long maxWaitTime, TimeUnit unit) throws Exception { + public static void assertBusy(CheckedRunnable codeBlock, long maxWaitTime, TimeUnit unit) throws Exception { long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit); long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1); long timeInMillis = 1; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java index c4bd9643657..8341734ccb5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java @@ -21,6 +21,7 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.annotations.Listeners; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TimeUnits; @@ -51,10 +52,6 @@ public abstract class ESTokenStreamTestCase extends BaseTokenStreamTestCase { BootstrapForTesting.ensureInitialized(); } - public static Version randomVersion() { - return VersionUtils.randomVersion(random()); - } - public Settings.Builder newAnalysisSettingsBuilder() { return Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 90cec479f1f..d7b9b186c9d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.test; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -130,12 +131,14 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public abstract class InternalAggregationTestCase extends AbstractWireSerializingTestCase { @@ -297,7 +300,13 @@ public abstract class InternalAggregationTestCase public final void testFromXContent() throws IOException { final T aggregation = createTestInstance(); - final Aggregation parsedAggregation = parseAndAssert(aggregation, randomBoolean()); + final Aggregation parsedAggregation = parseAndAssert(aggregation, randomBoolean(), false); + assertFromXContent(aggregation, (ParsedAggregation) parsedAggregation); + } + + public final void testFromXContentWithRandomFields() throws IOException { + final T aggregation = createTestInstance(); + final Aggregation parsedAggregation = parseAndAssert(aggregation, randomBoolean(), true); assertFromXContent(aggregation, (ParsedAggregation) parsedAggregation); } @@ -305,7 +314,7 @@ public abstract class InternalAggregationTestCase @SuppressWarnings("unchecked") protected

P parseAndAssert(final InternalAggregation aggregation, - final boolean shuffled) throws IOException { + final boolean shuffled, final boolean addRandomFields) throws IOException { final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); final XContentType xContentType = randomFrom(XContentType.values()); @@ -317,29 +326,57 @@ public abstract class InternalAggregationTestCase } else { originalBytes = toXContent(aggregation, xContentType, params, humanReadable); } + BytesReference mutated; + if (addRandomFields) { + /* + * - we don't add to the root object because it should only contain + * the named aggregation to test - we don't want to insert into the + * "meta" object, because we pass on everything we find there + * + * - we don't want to directly insert anything random into "buckets" + * objects, they are used with "keyed" aggregations and contain + * named bucket objects. Any new named object on this level should + * also be a bucket and be parsed as such. + */ + Predicate basicExcludes = path -> path.isEmpty() || path.endsWith(Aggregation.CommonFields.META.getPreferredName()) + || path.endsWith(Aggregation.CommonFields.BUCKETS.getPreferredName()); + Predicate excludes = basicExcludes.or(excludePathsFromXContentInsertion()); + mutated = insertRandomFields(xContentType, originalBytes, excludes, random()); + } else { + mutated = originalBytes; + } - Aggregation parsedAggregation; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + SetOnce parsedAggregation = new SetOnce<>(); + try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); - - parsedAggregation = XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class, parsedAggregation::set); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); assertNull(parser.nextToken()); - assertEquals(aggregation.getName(), parsedAggregation.getName()); - assertEquals(aggregation.getMetaData(), parsedAggregation.getMetaData()); + Aggregation agg = parsedAggregation.get(); + assertEquals(aggregation.getName(), agg.getName()); + assertEquals(aggregation.getMetaData(), agg.getMetaData()); - assertTrue(parsedAggregation instanceof ParsedAggregation); - assertEquals(aggregation.getType(), parsedAggregation.getType()); + assertTrue(agg instanceof ParsedAggregation); + assertEquals(aggregation.getType(), agg.getType()); + + BytesReference parsedBytes = toXContent(agg, xContentType, params, humanReadable); + assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + + return (P) agg; } - BytesReference parsedBytes = toXContent(parsedAggregation, xContentType, params, humanReadable); - assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + } - return (P) parsedAggregation; + /** + * Overwrite this in your test if other than the basic xContent paths should be excluded during insertion of random fields + */ + protected Predicate excludePathsFromXContentInsertion() { + return path -> false; } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index c4e191e75c5..eeec27db4dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.transport.TransportAddress; @@ -102,6 +103,7 @@ import org.junit.Assert; import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.InetSocketAddress; import java.nio.file.Path; import java.util.ArrayList; @@ -605,7 +607,13 @@ public final class InternalTestCluster extends TestCluster { } else if (!usingSingleNodeDiscovery && finalSettings.get(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()) == null) { throw new IllegalArgumentException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " must be configured"); } - MockNode node = new MockNode(finalSettings.build(), plugins); + SecureSettings secureSettings = finalSettings.getSecureSettings(); + MockNode node = new MockNode(finalSettings.build(), plugins, nodeConfigurationSource.nodeConfigPath(nodeId)); + try { + IOUtils.close(secureSettings); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return new NodeAndClient(name, node, nodeId); } @@ -2016,12 +2024,9 @@ public final class InternalTestCluster extends TestCluster { // in an assertBusy loop, so it will try for 10 seconds and // fail if it never reached 0 try { - assertBusy(new Runnable() { - @Override - public void run() { - CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST); - assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L)); - } + assertBusy(() -> { + CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST); + assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L)); }); } catch (Exception e) { fail("Exception during check for request breaker reset to 0: " + e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java index 6d8d36e3d11..60c69bbd6c6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java +++ b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java @@ -21,6 +21,7 @@ package org.elasticsearch.test; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; @@ -32,6 +33,11 @@ public abstract class NodeConfigurationSource { return Settings.EMPTY; } + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + @Override public Settings transportClientSettings() { return Settings.EMPTY; @@ -43,6 +49,8 @@ public abstract class NodeConfigurationSource { */ public abstract Settings nodeSettings(int nodeOrdinal); + public abstract Path nodeConfigPath(int nodeOrdinal); + /** Returns plugins that should be loaded on the node */ public Collection> nodePlugins() { return Collections.emptyList(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java index 81d8fa84a19..4c4fe8f76ad 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java @@ -21,28 +21,14 @@ package org.elasticsearch.test; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.segments.IndexSegments; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; -import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus; -import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.IndexFolderUpgrader; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.MergePolicyConfig; -import org.elasticsearch.index.engine.Segment; import java.io.IOException; -import java.io.InputStream; import java.nio.file.DirectoryStream; import java.nio.file.FileVisitResult; import java.nio.file.Files; @@ -50,17 +36,14 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.List; import static junit.framework.TestCase.assertFalse; import static junit.framework.TestCase.assertTrue; import static org.elasticsearch.test.ESTestCase.randomInt; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -86,32 +69,6 @@ public class OldIndexUtils { .build(); } - public static void upgradeIndexFolder(InternalTestCluster cluster, String nodeName) throws Exception { - final NodeEnvironment nodeEnvironment = cluster.getInstance(NodeEnvironment.class, nodeName); - IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment); - } - - public static void loadIndex(String indexName, String indexFile, Path unzipDir, Path bwcPath, Logger logger, Path... paths) throws - Exception { - Path unzipDataDir = unzipDir.resolve("data"); - - Path backwardsIndex = bwcPath.resolve(indexFile); - // decompress the index - try (InputStream stream = Files.newInputStream(backwardsIndex)) { - TestUtil.unzip(stream, unzipDir); - } - - // check it is unique - assertTrue(Files.exists(unzipDataDir)); - Path[] list = FileSystemUtils.files(unzipDataDir); - if (list.length != 1) { - throw new IllegalStateException("Backwards index must contain exactly one cluster"); - } - - final Path src = getIndexDir(logger, indexName, indexFile, list[0]); - copyIndex(logger, src, src.getFileName().toString(), paths); - } - public static Path getIndexDir( final Logger logger, final String indexName, @@ -141,24 +98,6 @@ public class OldIndexUtils { } } - public static void assertNotUpgraded(Client client, String... index) throws Exception { - for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { - assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); - // TODO: it would be better for this to be strictly greater, but sometimes an extra flush - // mysteriously happens after the second round of docs are indexed - assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log", - status.getTotalBytes() >= status.getToUpgradeBytes()); - assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0); - } - } - - @SuppressWarnings("unchecked") - public static Collection getUpgradeStatus(Client client, String... indices) throws Exception { - UpgradeStatusResponse upgradeStatusResponse = client.admin().indices().prepareUpgradeStatus(indices).get(); - assertNoFailures(upgradeStatusResponse); - return upgradeStatusResponse.getIndices().values(); - } - // randomly distribute the files from src over dests paths public static void copyIndex(final Logger logger, final Path src, final String folderName, final Path... dests) throws IOException { Path destinationDataPath = dests[randomInt(dests.length - 1)]; @@ -196,59 +135,4 @@ public class OldIndexUtils { } }); } - - public static void assertUpgraded(Client client, String... index) throws Exception { - for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { - assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); - assertEquals("index " + status.getIndex() + " should be upgraded", - 0, status.getToUpgradeBytes()); - } - - // double check using the segments api that all segments are actually upgraded - IndicesSegmentResponse segsRsp; - if (index == null) { - segsRsp = client.admin().indices().prepareSegments().execute().actionGet(); - } else { - segsRsp = client.admin().indices().prepareSegments(index).execute().actionGet(); - } - for (IndexSegments indexSegments : segsRsp.getIndices().values()) { - for (IndexShardSegments shard : indexSegments) { - for (ShardSegments segs : shard.getShards()) { - for (Segment seg : segs.getSegments()) { - assertEquals("Index " + indexSegments.getIndex() + " has unupgraded segment " + seg.toString(), - Version.CURRENT.luceneVersion.major, seg.version.major); - assertEquals("Index " + indexSegments.getIndex() + " has unupgraded segment " + seg.toString(), - Version.CURRENT.luceneVersion.minor, seg.version.minor); - } - } - } - } - } - - public static boolean isUpgraded(Client client, String index) throws Exception { - Logger logger = Loggers.getLogger(OldIndexUtils.class); - int toUpgrade = 0; - for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { - logger.info("Index: {}, total: {}, toUpgrade: {}", status.getIndex(), status.getTotalBytes(), status.getToUpgradeBytes()); - toUpgrade += status.getToUpgradeBytes(); - } - return toUpgrade == 0; - } - - public static void assertUpgradeWorks(Client client, String indexName, Version version) throws Exception { - if (OldIndexUtils.isLatestLuceneVersion(version) == false) { - OldIndexUtils.assertNotUpgraded(client, indexName); - } - assertNoFailures(client.admin().indices().prepareUpgrade(indexName).get()); - assertUpgraded(client, indexName); - } - - public static Version extractVersion(String index) { - return Version.fromString(index.substring(index.indexOf('-') + 1, index.lastIndexOf('.'))); - } - - public static boolean isLatestLuceneVersion(Version version) { - return version.luceneVersion.major == Version.CURRENT.luceneVersion.major && - version.luceneVersion.minor == Version.CURRENT.luceneVersion.minor; - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java index 16953b45d19..c8a0ce8fc28 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java @@ -32,13 +32,13 @@ import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Stack; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiOfLength; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; @@ -195,22 +195,20 @@ public final class XContentTestUtils { } } - try (XContentParser parser = createParser(NamedXContentRegistry.EMPTY, xContent, contentType)) { - Supplier value = () -> { + Supplier value = () -> { + List randomValues = RandomObjects.randomStoredFieldValues(random, contentType).v1(); + if (random.nextBoolean()) { + return randomValues.get(0); + } else { if (random.nextBoolean()) { - return RandomObjects.randomStoredFieldValues(random, contentType); + return randomValues.stream().collect(Collectors.toMap(obj -> randomAsciiOfLength(random, 10), obj -> obj)); } else { - if (random.nextBoolean()) { - return Collections.singletonMap(randomAsciiOfLength(random, 10), randomAsciiOfLength(random, 10)); - } else { - return Collections.singletonList(randomAsciiOfLength(random, 10)); - } + return randomValues; } - }; - return XContentTestUtils - .insertIntoXContent(contentType.xContent(), xContent, insertPaths, () -> randomAsciiOfLength(random, 10), value) - .bytes(); - } + } + }; + return XContentTestUtils + .insertIntoXContent(contentType.xContent(), xContent, insertPaths, () -> randomAsciiOfLength(random, 10), value).bytes(); } /** @@ -251,7 +249,8 @@ public final class XContentTestUtils { List validPaths = new ArrayList<>(); // parser.currentName() can be null for root object and unnamed objects in arrays if (parser.currentName() != null) { - currentPath.push(parser.currentName()); + // dots in randomized field names need to be escaped, we use that character as the path separator + currentPath.push(parser.currentName().replaceAll("\\.", "\\\\.")); } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { validPaths.add(String.join(".", currentPath.toArray(new String[currentPath.size()]))); diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index f4be0d0d529..e2ff2fbe26f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -34,6 +34,7 @@ import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.net.InetSocketAddress; import java.net.ServerSocket; +import java.nio.file.Path; import java.util.HashSet; import java.util.Set; @@ -57,6 +58,11 @@ public class ClusterDiscoveryConfiguration extends NodeConfigurationSource { return nodeSettings; } + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + @Override public Settings transportClientSettings() { return transportClientSettings; diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java index 314f1b52852..1d89fc981f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java @@ -81,12 +81,14 @@ public class ThrowingLeafReaderWrapper extends FilterLeafReader { this.thrower = thrower; } - @Override - public Fields fields() throws IOException { - Fields fields = super.fields(); - thrower.maybeThrow(Flags.Fields); - return fields == null ? null : new ThrowingFields(fields, thrower); + public Terms terms(String field) throws IOException { + Terms terms = super.terms(field); + if (thrower.wrapTerms(field)) { + thrower.maybeThrow(Flags.Terms); + return terms == null ? null : new ThrowingTerms(terms, thrower); + } + return terms; } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index ce10c631506..97b58ceda72 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -69,6 +69,7 @@ public abstract class ESRestTestCase extends ESTestCase { public static final String TRUSTSTORE_PATH = "truststore.path"; public static final String TRUSTSTORE_PASSWORD = "truststore.password"; public static final String CLIENT_RETRY_TIMEOUT = "client.retry.timeout"; + public static final String CLIENT_SOCKET_TIMEOUT = "client.socket.timeout"; /** * Convert the entity from a {@link Response} into a map of maps. @@ -180,11 +181,21 @@ public abstract class ESRestTestCase extends ESTestCase { /** * Returns whether to preserve the repositories on completion of this test. + * Defaults to not preserving repos. See also + * {@link #preserveSnapshotsUponCompletion()}. */ protected boolean preserveReposUponCompletion() { return false; } + /** + * Returns whether to preserve the snapshots in repositories on completion of this + * test. Defaults to not preserving snapshots. Only works for {@code fs} repositories. + */ + protected boolean preserveSnapshotsUponCompletion() { + return false; + } + private void wipeCluster() throws IOException { if (preserveIndicesUponCompletion() == false) { // wipe indices @@ -216,7 +227,7 @@ public abstract class ESRestTestCase extends ESTestCase { String repoName = repo.getKey(); Map repoSpec = (Map) repo.getValue(); String repoType = (String) repoSpec.get("type"); - if (repoType.equals("fs")) { + if (false == preserveSnapshotsUponCompletion() && repoType.equals("fs")) { // All other repo types we really don't have a chance of being able to iterate properly, sadly. String url = "_snapshot/" + repoName + "/_all"; Map params = singletonMap("ignore_unavailable", "true"); @@ -346,6 +357,11 @@ public abstract class ESRestTestCase extends ESTestCase { final TimeValue maxRetryTimeout = TimeValue.parseTimeValue(requestTimeoutString, CLIENT_RETRY_TIMEOUT); builder.setMaxRetryTimeoutMillis(Math.toIntExact(maxRetryTimeout.getMillis())); } + final String socketTimeoutString = settings.get(CLIENT_SOCKET_TIMEOUT); + if (socketTimeoutString != null) { + final TimeValue socketTimeout = TimeValue.parseTimeValue(socketTimeoutString, CLIENT_SOCKET_TIMEOUT); + builder.setRequestConfigCallback(conf -> conf.setSocketTimeout(Math.toIntExact(socketTimeout.getMillis()))); + } return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 281dbb5115f..5d6fe757aa2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -97,7 +97,7 @@ public class MockFSDirectoryService extends FsDirectoryService { logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), throttle, crashIndex); } - delegateService = randomDirectorService(indexStore, path); + delegateService = randomDirectoryService(indexStore, path); } @@ -162,9 +162,16 @@ public class MockFSDirectoryService extends FsDirectoryService { return w; } - private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) { + private FsDirectoryService randomDirectoryService(IndexStore indexStore, ShardPath path) { final IndexSettings indexSettings = indexStore.getIndexSettings(); - final IndexMetaData build = IndexMetaData.builder(indexSettings.getIndexMetaData()).settings(Settings.builder().put(indexSettings.getSettings()).put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey())).build(); + final IndexMetaData build = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder() + // don't use the settings from indexSettings#getSettings() they are merged with node settings and might contain + // secure settings that should not be copied in here since the new IndexSettings ctor below will barf if we do + .put(indexSettings.getIndexMetaData().getSettings()) + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), + RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey())) + .build(); final IndexSettings newIndexSettings = new IndexSettings(build, indexSettings.getNodeSettings()); return new FsDirectoryService(newIndexSettings, indexStore, path); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 55519ec2af2..2ccddf6bc54 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.TransportStats; import java.io.IOException; import java.io.UncheckedIOException; @@ -213,6 +214,11 @@ public class CapturingTransport implements Transport { }; } + @Override + public TransportStats getStats() { + throw new UnsupportedOperationException(); + } + @Override public void transportServiceAdapter(TransportServiceAdapter adapter) { this.adapter = adapter; @@ -250,11 +256,6 @@ public class CapturingTransport implements Transport { } - @Override - public long serverOpen() { - return 0; - } - @Override public Lifecycle.State lifecycleState() { return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 210190940d2..25525de7fbf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -56,6 +56,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportServiceAdapter; +import org.elasticsearch.transport.TransportStats; import java.io.IOException; import java.net.UnknownHostException; @@ -217,10 +218,17 @@ public final class MockTransportService extends TransportService { } } + @Override + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { + throw new ConnectTransportException(node, "DISCONNECT: simulated"); + } + @Override protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { - simulateDisconnect(connection, original, "DISCONNECT: simulated"); + connection.close(); + // send the request, which will blow up + connection.sendRequest(requestId, action, request, options); } }); } @@ -255,19 +263,12 @@ public final class MockTransportService extends TransportService { addDelegate(transportAddress, new DelegateTransport(original) { - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - original.connectToNode(node, connectionProfile, connectionValidator); - } - @Override protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { if (blockedActions.contains(action)) { logger.info("--> preventing {} request", action); - simulateDisconnect(connection, original, "DISCONNECT: prevented " + action + " request"); + connection.close(); } connection.sendRequest(requestId, action, request, options); } @@ -301,6 +302,11 @@ public final class MockTransportService extends TransportService { } } + @Override + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { + throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); + } + @Override protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { @@ -367,6 +373,28 @@ public final class MockTransportService extends TransportService { } } + @Override + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { + TimeValue delay = getDelay(); + if (delay.millis() <= 0) { + return original.openConnection(node, profile); + } + + // TODO: Replace with proper setting + TimeValue connectingTimeout = NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT.getDefault(Settings.EMPTY); + try { + if (delay.millis() < connectingTimeout.millis()) { + Thread.sleep(delay.millis()); + return original.openConnection(node, profile); + } else { + Thread.sleep(connectingTimeout.millis()); + throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); + } + } catch (InterruptedException e) { + throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); + } + } + @Override protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { @@ -448,37 +476,6 @@ public final class MockTransportService extends TransportService { return (LookupTestTransport) transport; } - /** - * simulates a disconnect by disconnecting from the underlying transport and throwing a - * {@link ConnectTransportException} - */ - private void simulateDisconnect(DiscoveryNode node, Transport transport, String reason) { - simulateDisconnect(node, transport, reason, null); - } - - /** - * simulates a disconnect by disconnecting from the underlying transport and throwing a - * {@link ConnectTransportException}, due to a specific cause exception - */ - private void simulateDisconnect(DiscoveryNode node, Transport transport, String reason, @Nullable Throwable e) { - if (transport.nodeConnected(node)) { - // this a connected node, disconnecting from it will be up the exception - transport.disconnectFromNode(node); - } else { - throw new ConnectTransportException(node, reason, e); - } - } - - /** - * simulates a disconnect by closing the connection and throwing a - * {@link ConnectTransportException} - */ - private void simulateDisconnect(Transport.Connection connection, Transport transport, String reason) throws IOException { - connection.close(); - simulateDisconnect(connection.getNode(), transport, reason); - } - - /** * A lookup transport that has a list of potential Transport implementations to delegate to for node operations, * if none is registered, then the default one is used. @@ -572,11 +569,6 @@ public final class MockTransportService extends TransportService { transport.disconnectFromNode(node); } - @Override - public long serverOpen() { - return transport.serverOpen(); - } - @Override public List getLocalAddresses() { return transport.getLocalAddresses(); @@ -609,6 +601,11 @@ public final class MockTransportService extends TransportService { }; } + @Override + public TransportStats getStats() { + return transport.getStats(); + } + @Override public Lifecycle.State lifecycleState() { return transport.lifecycleState(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 99704235cc7..7c0070e0f96 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; @@ -61,7 +60,6 @@ import java.net.ServerSocket; import java.net.Socket; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -169,8 +167,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { try { assertNoPendingHandshakes(serviceA.getOriginalTransport()); assertNoPendingHandshakes(serviceB.getOriginalTransport()); - assertPendingConnections(0, serviceA.getOriginalTransport()); - assertPendingConnections(0, serviceB.getOriginalTransport()); } finally { IOUtils.close(serviceA, serviceB, () -> { try { @@ -194,12 +190,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } } - public void assertPendingConnections(int numConnections, Transport transport) { - if (transport instanceof TcpTransport) { - TcpTransport tcpTransport = (TcpTransport) transport; - assertEquals(numConnections, tcpTransport.getNumOpenConnections() - tcpTransport.getNumConnectedNodes()); - } - } public void testHelloWorld() { serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, @@ -1471,12 +1461,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // all is well } - try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)) { - serviceB.handshake(connection, 100); - fail("exception should be thrown"); - } catch (IllegalStateException e) { - // all is well - } + expectThrows(ConnectTransportException.class, () -> serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)); } public void testMockUnresponsiveRule() throws IOException { @@ -1527,12 +1512,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // all is well } - try (Transport.Connection connection = serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)) { - serviceB.handshake(connection, 100); - fail("exception should be thrown"); - } catch (IllegalStateException e) { - // all is well - } + expectThrows(ConnectTransportException.class, () -> serviceB.openConnection(nodeA, MockTcpTransport.LIGHT_PROFILE)); } @@ -2243,13 +2223,200 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); receivedLatch.await(); - assertPendingConnections(1, serviceB.getOriginalTransport()); serviceC.close(); - assertPendingConnections(0, serviceC.getOriginalTransport()); sendResponseLatch.countDown(); responseLatch.await(); } - assertPendingConnections(0, serviceC.getOriginalTransport()); } + public void testTransportStats() throws Exception { + MockTransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true); + CountDownLatch receivedLatch = new CountDownLatch(1); + CountDownLatch sendResponseLatch = new CountDownLatch(1); + serviceB.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + (request, channel) -> { + // don't block on a network thread here + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); + } + } + + @Override + protected void doRun() throws Exception { + receivedLatch.countDown(); + sendResponseLatch.await(); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + }); + }); + serviceC.start(); + serviceC.acceptIncomingRequests(); + CountDownLatch responseLatch = new CountDownLatch(1); + TransportResponseHandler transportResponseHandler = new TransportResponseHandler() { + @Override + public TransportResponse newInstance() { + return TransportResponse.Empty.INSTANCE; + } + + @Override + public void handleResponse(TransportResponse response) { + responseLatch.countDown(); + } + + @Override + public void handleException(TransportException exp) { + responseLatch.countDown(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }; + + TransportStats stats = serviceC.transport.getStats(); // nothing transmitted / read yet + assertEquals(0, stats.getRxCount()); + assertEquals(0, stats.getTxCount()); + assertEquals(0, stats.getRxSize().getBytes()); + assertEquals(0, stats.getTxSize().getBytes()); + + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections(1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE); + try (Transport.Connection connection = serviceC.openConnection(serviceB.getLocalNode(), builder.build())) { + assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here + TransportStats transportStats = serviceC.transport.getStats(); // we did a single round-trip to do the initial handshake + assertEquals(1, transportStats.getRxCount()); + assertEquals(1, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(45, transportStats.getTxSize().getBytes()); + }); + serviceC.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, + transportResponseHandler); + receivedLatch.await(); + assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here + TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + assertEquals(1, transportStats.getRxCount()); + assertEquals(2, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(91, transportStats.getTxSize().getBytes()); + }); + sendResponseLatch.countDown(); + responseLatch.await(); + stats = serviceC.transport.getStats(); // response has been received + assertEquals(2, stats.getRxCount()); + assertEquals(2, stats.getTxCount()); + assertEquals(46, stats.getRxSize().getBytes()); + assertEquals(91, stats.getTxSize().getBytes()); + } finally { + serviceC.close(); + } + } + + public void testTransportStatsWithException() throws Exception { + MockTransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true); + CountDownLatch receivedLatch = new CountDownLatch(1); + CountDownLatch sendResponseLatch = new CountDownLatch(1); + Exception ex = new RuntimeException("boom"); + ex.setStackTrace(new StackTraceElement[0]); + serviceB.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + (request, channel) -> { + // don't block on a network thread here + threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (IOException e1) { + throw new UncheckedIOException(e1); + } + } + + @Override + protected void doRun() throws Exception { + receivedLatch.countDown(); + sendResponseLatch.await(); + onFailure(ex); + } + }); + }); + serviceC.start(); + serviceC.acceptIncomingRequests(); + CountDownLatch responseLatch = new CountDownLatch(1); + TransportResponseHandler transportResponseHandler = new TransportResponseHandler() { + @Override + public TransportResponse newInstance() { + return TransportResponse.Empty.INSTANCE; + } + + @Override + public void handleResponse(TransportResponse response) { + responseLatch.countDown(); + } + + @Override + public void handleException(TransportException exp) { + responseLatch.countDown(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }; + + TransportStats stats = serviceC.transport.getStats(); // nothing transmitted / read yet + assertEquals(0, stats.getRxCount()); + assertEquals(0, stats.getTxCount()); + assertEquals(0, stats.getRxSize().getBytes()); + assertEquals(0, stats.getTxSize().getBytes()); + + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections(1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE); + try (Transport.Connection connection = serviceC.openConnection(serviceB.getLocalNode(), builder.build())) { + assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here + TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + assertEquals(1, transportStats.getRxCount()); + assertEquals(1, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(45, transportStats.getTxSize().getBytes()); + }); + serviceC.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, + transportResponseHandler); + receivedLatch.await(); + assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here + TransportStats transportStats = serviceC.transport.getStats(); // request has ben send + assertEquals(1, transportStats.getRxCount()); + assertEquals(2, transportStats.getTxCount()); + assertEquals(25, transportStats.getRxSize().getBytes()); + assertEquals(91, transportStats.getTxSize().getBytes()); + }); + sendResponseLatch.countDown(); + responseLatch.await(); + stats = serviceC.transport.getStats(); // exception response has been received + assertEquals(2, stats.getRxCount()); + assertEquals(2, stats.getTxCount()); + int addressLen = serviceB.boundAddress().publishAddress().address().getAddress().getAddress().length; + // if we are bound to a IPv6 address the response address is serialized with the exception so it will be different depending + // on the stack. The emphemeral port will always be in the same range + assertEquals(185 + addressLen, stats.getRxSize().getBytes()); + assertEquals(91, stats.getTxSize().getBytes()); + } finally { + serviceC.close(); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 38a1701a7e1..94f5351cae7 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -248,7 +248,7 @@ public class MockTcpTransport extends TcpTransport } @Override - public long serverOpen() { + public long getNumOpenServerConnections() { return 1; } @@ -306,7 +306,9 @@ public class MockTcpTransport extends TcpTransport configureSocket(incomingSocket); synchronized (this) { if (isOpen.get()) { - incomingChannel = new MockChannel(incomingSocket, localAddress, profile, workerChannels::remove); + incomingChannel = new MockChannel(incomingSocket, + new InetSocketAddress(incomingSocket.getLocalAddress(), incomingSocket.getPort()), profile, + workerChannels::remove); //establish a happens-before edge between closing and accepting a new connection workerChannels.add(incomingChannel); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/AcceptingSelector.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/AcceptingSelector.java new file mode 100644 index 00000000000..c2c9ac03a2a --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/AcceptingSelector.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; + +import java.io.IOException; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ClosedSelectorException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; + +/** + * Selector implementation that handles {@link NioServerSocketChannel}. It's main piece of functionality is + * accepting new channels. + */ +public class AcceptingSelector extends ESSelector { + + private final AcceptorEventHandler eventHandler; + private final ConcurrentLinkedQueue newChannels = new ConcurrentLinkedQueue<>(); + + public AcceptingSelector(AcceptorEventHandler eventHandler) throws IOException { + super(eventHandler); + this.eventHandler = eventHandler; + } + + public AcceptingSelector(AcceptorEventHandler eventHandler, Selector selector) throws IOException { + super(eventHandler, selector); + this.eventHandler = eventHandler; + } + + @Override + void doSelect(int timeout) throws IOException, ClosedSelectorException { + setUpNewServerChannels(); + + int ready = selector.select(timeout); + if (ready > 0) { + Set selectionKeys = selector.selectedKeys(); + Iterator keyIterator = selectionKeys.iterator(); + while (keyIterator.hasNext()) { + SelectionKey sk = keyIterator.next(); + keyIterator.remove(); + acceptChannel(sk); + } + } + } + + @Override + void cleanup() { + channelsToClose.addAll(registeredChannels); + closePendingChannels(); + } + + /** + * Registers a NioServerSocketChannel to be handled by this selector. The channel will by queued and + * eventually registered next time through the event loop. + * @param serverSocketChannel the channel to register + */ + public void registerServerChannel(NioServerSocketChannel serverSocketChannel) { + newChannels.add(serverSocketChannel); + wakeup(); + } + + private void setUpNewServerChannels() throws ClosedChannelException { + NioServerSocketChannel newChannel; + while ((newChannel = this.newChannels.poll()) != null) { + if (newChannel.register(this)) { + SelectionKey selectionKey = newChannel.getSelectionKey(); + selectionKey.attach(newChannel); + registeredChannels.add(newChannel); + eventHandler.serverChannelRegistered(newChannel); + } + } + } + + private void acceptChannel(SelectionKey sk) { + NioServerSocketChannel serverChannel = (NioServerSocketChannel) sk.attachment(); + if (sk.isValid()) { + try { + if (sk.isAcceptable()) { + try { + eventHandler.acceptChannel(serverChannel); + } catch (IOException e) { + eventHandler.acceptException(serverChannel, e); + } + } + } catch (CancelledKeyException ex) { + eventHandler.genericServerChannelException(serverChannel, ex); + } + } else { + eventHandler.genericServerChannelException(serverChannel, new CancelledKeyException()); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/AcceptorEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/AcceptorEventHandler.java new file mode 100644 index 00000000000..7ce3b93e17c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/AcceptorEventHandler.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.transport.nio.channel.ChannelFactory; +import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.elasticsearch.transport.nio.channel.SelectionKeyUtils; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * Event handler designed to handle events from server sockets + */ +public class AcceptorEventHandler extends EventHandler { + + private final Supplier selectorSupplier; + private final OpenChannels openChannels; + + public AcceptorEventHandler(Logger logger, OpenChannels openChannels, Supplier selectorSupplier) { + super(logger); + this.openChannels = openChannels; + this.selectorSupplier = selectorSupplier; + } + + /** + * This method is called when a NioServerSocketChannel is successfully registered. It should only be + * called once per channel. + * + * @param nioServerSocketChannel that was registered + */ + public void serverChannelRegistered(NioServerSocketChannel nioServerSocketChannel) { + SelectionKeyUtils.setAcceptInterested(nioServerSocketChannel); + openChannels.serverChannelOpened(nioServerSocketChannel); + } + + /** + * This method is called when a server channel signals it is ready to accept a connection. All of the + * accept logic should occur in this call. + * + * @param nioServerChannel that can accept a connection + */ + public void acceptChannel(NioServerSocketChannel nioServerChannel) throws IOException { + ChannelFactory channelFactory = nioServerChannel.getChannelFactory(); + NioSocketChannel nioSocketChannel = channelFactory.acceptNioChannel(nioServerChannel); + openChannels.acceptedChannelOpened(nioSocketChannel); + nioSocketChannel.getCloseFuture().setListener(openChannels::channelClosed); + selectorSupplier.get().registerSocketChannel(nioSocketChannel); + } + + /** + * This method is called when an attempt to accept a connection throws an exception. + * + * @param nioServerChannel that accepting a connection + * @param exception that occurred + */ + public void acceptException(NioServerSocketChannel nioServerChannel, Exception exception) { + logger.debug("exception while accepting new channel", exception); + } + + /** + * This method is called when handling an event from a channel fails due to an unexpected exception. + * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw + * {@link java.nio.channels.CancelledKeyException}. + * + * @param channel that caused the exception + * @param exception that was thrown + */ + public void genericServerChannelException(NioServerSocketChannel channel, Exception exception) { + logger.debug("event handling exception", exception); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/ESSelector.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/ESSelector.java new file mode 100644 index 00000000000..c5cf7e25931 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/ESSelector.java @@ -0,0 +1,196 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.transport.nio.channel.NioChannel; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.channels.ClosedSelectorException; +import java.nio.channels.Selector; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; + +/** + * This is a basic selector abstraction used by {@link org.elasticsearch.transport.nio.NioTransport}. This + * selector wraps a raw nio {@link Selector}. When you call {@link #runLoop()}, the selector will run until + * {@link #close()} is called. This instance handles closing of channels. Users should call + * {@link #queueChannelClose(NioChannel)} to schedule a channel for close by this selector. + *

+ * Children of this class should implement the specific {@link #doSelect(int)} and {@link #cleanup()} + * functionality. + */ +public abstract class ESSelector implements Closeable { + + final Selector selector; + final ConcurrentLinkedQueue channelsToClose = new ConcurrentLinkedQueue<>(); + final Set registeredChannels = Collections.newSetFromMap(new ConcurrentHashMap()); + + private final EventHandler eventHandler; + private final ReentrantLock runLock = new ReentrantLock(); + private final AtomicBoolean isClosed = new AtomicBoolean(false); + private final PlainActionFuture isRunningFuture = PlainActionFuture.newFuture(); + private volatile Thread thread; + + ESSelector(EventHandler eventHandler) throws IOException { + this(eventHandler, Selector.open()); + } + + ESSelector(EventHandler eventHandler, Selector selector) throws IOException { + this.eventHandler = eventHandler; + this.selector = selector; + } + + /** + * Starts this selector. The selector will run until {@link #close()} or {@link #close(boolean)} is + * called. + */ + public void runLoop() { + if (runLock.tryLock()) { + isRunningFuture.onResponse(true); + try { + setThread(); + while (isOpen()) { + singleLoop(); + } + } finally { + try { + cleanup(); + } finally { + runLock.unlock(); + } + } + } else { + throw new IllegalStateException("selector is already running"); + } + } + + void singleLoop() { + try { + closePendingChannels(); + doSelect(300); + } catch (ClosedSelectorException e) { + if (isOpen()) { + throw e; + } + } catch (IOException e) { + eventHandler.selectException(e); + } catch (Exception e) { + eventHandler.uncaughtException(e); + } + } + + /** + * Should implement the specific select logic. This will be called once per {@link #singleLoop()} + * + * @param timeout to pass to the raw select operation + * @throws IOException thrown by the raw select operation + * @throws ClosedSelectorException thrown if the raw selector is closed + */ + abstract void doSelect(int timeout) throws IOException, ClosedSelectorException; + + void setThread() { + thread = Thread.currentThread(); + } + + public boolean isOnCurrentThread() { + return Thread.currentThread() == thread; + } + + public void wakeup() { + // TODO: Do I need the wakeup optimizations that some other libraries use? + selector.wakeup(); + } + + public Set getRegisteredChannels() { + return registeredChannels; + } + + @Override + public void close() throws IOException { + close(false); + } + + public void close(boolean shouldInterrupt) throws IOException { + if (isClosed.compareAndSet(false, true)) { + selector.close(); + if (shouldInterrupt && thread != null) { + thread.interrupt(); + } else { + wakeup(); + } + runLock.lock(); // wait for the shutdown to complete + } + } + + public void queueChannelClose(NioChannel channel) { + ensureOpen(); + channelsToClose.offer(channel); + wakeup(); + } + + void closePendingChannels() { + NioChannel channel; + while ((channel = channelsToClose.poll()) != null) { + closeChannel(channel); + } + } + + + /** + * Called once as the selector is being closed. + */ + abstract void cleanup(); + + public Selector rawSelector() { + return selector; + } + + public boolean isOpen() { + return isClosed.get() == false; + } + + public boolean isRunning() { + return runLock.isLocked(); + } + + public PlainActionFuture isRunningFuture() { + return isRunningFuture; + } + + private void closeChannel(NioChannel channel) { + try { + eventHandler.handleClose(channel); + } finally { + registeredChannels.remove(channel); + } + } + + private void ensureOpen() { + if (isClosed.get()) { + throw new IllegalStateException("selector is already closed"); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/EventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/EventHandler.java new file mode 100644 index 00000000000..6ecf36343f7 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/EventHandler.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.transport.nio.channel.CloseFuture; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.io.IOException; +import java.nio.channels.Selector; + +public abstract class EventHandler { + + protected final Logger logger; + + public EventHandler(Logger logger) { + this.logger = logger; + } + + /** + * This method handles an IOException that was thrown during a call to {@link Selector#select(long)}. + * + * @param exception that was uncaught + */ + public void selectException(IOException exception) { + logger.warn("io exception during select", exception); + } + + /** + * This method handles an exception that was uncaught during a select loop. + * + * @param exception that was uncaught + */ + public void uncaughtException(Exception exception) { + Thread thread = Thread.currentThread(); + thread.getUncaughtExceptionHandler().uncaughtException(thread, exception); + } + + /** + * This method handles the closing of an NioChannel + * + * @param channel that should be closed + */ + public void handleClose(NioChannel channel) { + channel.closeFromSelector(); + CloseFuture closeFuture = channel.getCloseFuture(); + assert closeFuture.isDone() : "Should always be done as we are on the selector thread"; + IOException closeException = closeFuture.getCloseException(); + if (closeException != null) { + logger.trace("exception while closing channel", closeException); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NetworkBytesReference.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NetworkBytesReference.java new file mode 100644 index 00000000000..cbccd7333d6 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NetworkBytesReference.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; + +import java.nio.ByteBuffer; +import java.util.Iterator; + +public class NetworkBytesReference extends BytesReference { + + private final BytesArray bytesArray; + private final ByteBuffer writeBuffer; + private final ByteBuffer readBuffer; + + private int writeIndex; + private int readIndex; + + public NetworkBytesReference(BytesArray bytesArray, int writeIndex, int readIndex) { + this.bytesArray = bytesArray; + this.writeIndex = writeIndex; + this.readIndex = readIndex; + this.writeBuffer = ByteBuffer.wrap(bytesArray.array()); + this.readBuffer = ByteBuffer.wrap(bytesArray.array()); + } + + public static NetworkBytesReference wrap(BytesArray bytesArray) { + return wrap(bytesArray, 0, 0); + } + + public static NetworkBytesReference wrap(BytesArray bytesArray, int writeIndex, int readIndex) { + if (readIndex > writeIndex) { + throw new IndexOutOfBoundsException("Read index [" + readIndex + "] was greater than write index [" + writeIndex + "]"); + } + return new NetworkBytesReference(bytesArray, writeIndex, readIndex); + } + + @Override + public byte get(int index) { + return bytesArray.get(index); + } + + @Override + public int length() { + return bytesArray.length(); + } + + @Override + public NetworkBytesReference slice(int from, int length) { + BytesReference ref = bytesArray.slice(from, length); + BytesArray newBytesArray; + if (ref instanceof BytesArray) { + newBytesArray = (BytesArray) ref; + } else { + newBytesArray = new BytesArray(ref.toBytesRef()); + } + + int newReadIndex = Math.min(Math.max(readIndex - from, 0), length); + int newWriteIndex = Math.min(Math.max(writeIndex - from, 0), length); + + return wrap(newBytesArray, newWriteIndex, newReadIndex); + } + + @Override + public BytesRef toBytesRef() { + return bytesArray.toBytesRef(); + } + + @Override + public long ramBytesUsed() { + return bytesArray.ramBytesUsed(); + } + + public int getWriteIndex() { + return writeIndex; + } + + public void incrementWrite(int delta) { + int newWriteIndex = writeIndex + delta; + if (newWriteIndex > bytesArray.length()) { + throw new IndexOutOfBoundsException("New write index [" + newWriteIndex + "] would be greater than length" + + " [" + bytesArray.length() + "]"); + } + + writeIndex = newWriteIndex; + } + + public int getWriteRemaining() { + return bytesArray.length() - writeIndex; + } + + public boolean hasWriteRemaining() { + return getWriteRemaining() > 0; + } + + public int getReadIndex() { + return readIndex; + } + + public void incrementRead(int delta) { + int newReadIndex = readIndex + delta; + if (newReadIndex > writeIndex) { + throw new IndexOutOfBoundsException("New read index [" + newReadIndex + "] would be greater than write" + + " index [" + writeIndex + "]"); + } + readIndex = newReadIndex; + } + + public int getReadRemaining() { + return writeIndex - readIndex; + } + + public boolean hasReadRemaining() { + return getReadRemaining() > 0; + } + + public ByteBuffer getWriteByteBuffer() { + writeBuffer.position(bytesArray.offset() + writeIndex); + writeBuffer.limit(bytesArray.offset() + bytesArray.length()); + return writeBuffer; + } + + public ByteBuffer getReadByteBuffer() { + readBuffer.position(bytesArray.offset() + readIndex); + readBuffer.limit(bytesArray.offset() + writeIndex); + return readBuffer; + } + + public static void vectorizedIncrementReadIndexes(Iterable references, int delta) { + Iterator refs = references.iterator(); + while (delta != 0) { + NetworkBytesReference ref = refs.next(); + int amountToInc = Math.min(ref.getReadRemaining(), delta); + ref.incrementRead(amountToInc); + delta -= amountToInc; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java new file mode 100644 index 00000000000..bc06ad0bc34 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioClient.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.nio.channel.ChannelFactory; +import org.elasticsearch.transport.nio.channel.ConnectFuture; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.LockSupport; +import java.util.function.Consumer; +import java.util.function.Supplier; + +public class NioClient { + + private static final int CLOSED = -1; + + private final Logger logger; + private final OpenChannels openChannels; + private final Supplier selectorSupplier; + private final TimeValue defaultConnectTimeout; + private final ChannelFactory channelFactory; + private final Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); + + public NioClient(Logger logger, OpenChannels openChannels, Supplier selectorSupplier, TimeValue connectTimeout, + ChannelFactory channelFactory) { + this.logger = logger; + this.openChannels = openChannels; + this.selectorSupplier = selectorSupplier; + this.defaultConnectTimeout = connectTimeout; + this.channelFactory = channelFactory; + } + + public boolean connectToChannels(DiscoveryNode node, NioSocketChannel[] channels, TimeValue connectTimeout, + Consumer closeListener) throws IOException { + boolean allowedToConnect = semaphore.tryAcquire(); + if (allowedToConnect == false) { + return false; + } + + final ArrayList connections = new ArrayList<>(channels.length); + connectTimeout = getConnectTimeout(connectTimeout); + final InetSocketAddress address = node.getAddress().address(); + try { + for (int i = 0; i < channels.length; i++) { + SocketSelector socketSelector = selectorSupplier.get(); + NioSocketChannel nioSocketChannel = channelFactory.openNioChannel(address); + openChannels.clientChannelOpened(nioSocketChannel); + nioSocketChannel.getCloseFuture().setListener(closeListener); + connections.add(nioSocketChannel); + socketSelector.registerSocketChannel(nioSocketChannel); + } + + Exception ex = null; + boolean allConnected = true; + for (NioSocketChannel socketChannel : connections) { + ConnectFuture connectFuture = socketChannel.getConnectFuture(); + boolean success = connectFuture.awaitConnectionComplete(connectTimeout.getMillis(), TimeUnit.MILLISECONDS); + if (success == false) { + allConnected = false; + Exception exception = connectFuture.getException(); + if (exception != null) { + ex = exception; + break; + } + } + } + + if (allConnected == false) { + if (ex == null) { + throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]"); + } else { + throw new ConnectTransportException(node, "connect_exception", ex); + } + } + addConnectionsToList(channels, connections); + return true; + + } catch (IOException | RuntimeException e) { + closeChannels(connections, e); + throw e; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + closeChannels(connections, e); + throw new ElasticsearchException(e); + } finally { + semaphore.release(); + } + } + + public void close() { + semaphore.acquireUninterruptibly(Integer.MAX_VALUE); + } + + private TimeValue getConnectTimeout(TimeValue connectTimeout) { + if (connectTimeout != null && connectTimeout.equals(defaultConnectTimeout) == false) { + return connectTimeout; + } else { + return defaultConnectTimeout; + } + } + + private static void addConnectionsToList(NioSocketChannel[] channels, ArrayList connections) { + final Iterator iterator = connections.iterator(); + for (int i = 0; i < channels.length; i++) { + assert iterator.hasNext(); + channels[i] = iterator.next(); + } + assert iterator.hasNext() == false : "not all created connection have been consumed"; + } + + private void closeChannels(ArrayList connections, Exception e) { + for (final NioSocketChannel socketChannel : connections) { + try { + socketChannel.closeAsync().awaitClose(); + } catch (InterruptedException inner) { + logger.trace("exception while closing channel", e); + e.addSuppressed(inner); + Thread.currentThread().interrupt(); + } catch (Exception inner) { + logger.trace("exception while closing channel", e); + e.addSuppressed(inner); + } + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioShutdown.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioShutdown.java new file mode 100644 index 00000000000..8dc87f80f8a --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioShutdown.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.CountDownLatch; + +public class NioShutdown { + + private final Logger logger; + + public NioShutdown(Logger logger) { + this.logger = logger; + } + + void orderlyShutdown(OpenChannels openChannels, NioClient client, ArrayList acceptors, + ArrayList socketSelectors) { + // Close the client. This ensures that no new send connections will be opened. Client could be null if exception was + // throw on start up + if (client != null) { + client.close(); + } + + // Start by closing the server channels. Once these are closed, we are guaranteed to no accept new connections + openChannels.closeServerChannels(); + + for (AcceptingSelector acceptor : acceptors) { + shutdownSelector(acceptor); + } + + openChannels.close(); + + for (SocketSelector selector : socketSelectors) { + shutdownSelector(selector); + } + } + + private void shutdownSelector(ESSelector selector) { + try { + selector.close(); + } catch (IOException | ElasticsearchException e) { + logger.warn("unexpected exception while stopping selector", e); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java new file mode 100644 index 00000000000..05c818476a1 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -0,0 +1,289 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.transport.nio.channel.ChannelFactory; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ThreadFactory; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +public class NioTransport extends TcpTransport { + + // TODO: Need to add to places where we check if transport thread + public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "transport_worker"; + public static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = "transport_acceptor"; + + public static final Setting NIO_WORKER_COUNT = + new Setting<>("transport.nio.worker_count", + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); + + public static final Setting NIO_ACCEPTOR_COUNT = + intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); + + private final TcpReadHandler tcpReadHandler = new TcpReadHandler(this); + private final BigArrays bigArrays; + private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private final OpenChannels openChannels = new OpenChannels(logger); + private final ArrayList acceptors = new ArrayList<>(); + private final ArrayList socketSelectors = new ArrayList<>(); + private NioClient client; + private int acceptorNumber; + + public NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { + super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); + this.bigArrays = bigArrays; + } + + @Override + public long getNumOpenServerConnections() { + return openChannels.serverChannelsCount(); + } + + @Override + protected InetSocketAddress getLocalAddress(NioChannel channel) { + return channel.getLocalAddress(); + } + + @Override + protected NioServerSocketChannel bind(String name, InetSocketAddress address) throws IOException { + ChannelFactory channelFactory = this.profileToChannelFactory.get(name); + NioServerSocketChannel serverSocketChannel = channelFactory.openNioServerSocketChannel(name, address); + acceptors.get(++acceptorNumber % NioTransport.NIO_ACCEPTOR_COUNT.get(settings)).registerServerChannel(serverSocketChannel); + return serverSocketChannel; + } + + @Override + protected void closeChannels(List channels) throws IOException { + IOException closingExceptions = null; + for (final NioChannel channel : channels) { + if (channel != null && channel.isOpen()) { + try { + channel.closeAsync().awaitClose(); + } catch (Exception e) { + if (closingExceptions == null) { + closingExceptions = new IOException("failed to close channels"); + } + closingExceptions.addSuppressed(e.getCause()); + } + } + } + + if (closingExceptions != null) { + throw closingExceptions; + } + } + + @Override + protected void sendMessage(NioChannel channel, BytesReference reference, ActionListener listener) { + if (channel instanceof NioSocketChannel) { + NioSocketChannel nioSocketChannel = (NioSocketChannel) channel; + nioSocketChannel.getWriteContext().sendMessage(reference, listener); + } else { + logger.error("cannot send message to channel of this type [{}]", channel.getClass()); + } + } + + @Override + protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile, Consumer onChannelClose) + throws IOException { + NioSocketChannel[] channels = new NioSocketChannel[profile.getNumConnections()]; + ClientChannelCloseListener closeListener = new ClientChannelCloseListener(onChannelClose); + boolean connected = client.connectToChannels(node, channels, profile.getConnectTimeout(), closeListener); + if (connected == false) { + throw new ElasticsearchException("client is shutdown"); + } + return new NodeChannels(node, channels, profile); + } + + @Override + protected boolean isOpen(NioChannel channel) { + return channel.isOpen(); + } + + @Override + protected void doStart() { + boolean success = false; + try { + if (NetworkService.NETWORK_SERVER.get(settings)) { + int workerCount = NioTransport.NIO_WORKER_COUNT.get(settings); + for (int i = 0; i < workerCount; ++i) { + SocketSelector selector = new SocketSelector(getSocketEventHandler()); + socketSelectors.add(selector); + } + + int acceptorCount = NioTransport.NIO_ACCEPTOR_COUNT.get(settings); + for (int i = 0; i < acceptorCount; ++i) { + Supplier selectorSupplier = new RoundRobinSelectorSupplier(socketSelectors); + AcceptorEventHandler eventHandler = new AcceptorEventHandler(logger, openChannels, selectorSupplier); + AcceptingSelector acceptor = new AcceptingSelector(eventHandler); + acceptors.add(acceptor); + } + // loop through all profiles and start them up, special handling for default one + for (Map.Entry entry : buildProfileSettings().entrySet()) { + // merge fallback settings with default settings with profile settings so we have complete settings with default values + final Settings settings = Settings.builder() + .put(createFallbackSettings()) + .put(entry.getValue()).build(); + profileToChannelFactory.putIfAbsent(entry.getKey(), new ChannelFactory(settings, tcpReadHandler)); + bindServer(entry.getKey(), settings); + } + } + client = createClient(); + + for (SocketSelector selector : socketSelectors) { + if (selector.isRunning() == false) { + ThreadFactory threadFactory = daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX); + threadFactory.newThread(selector::runLoop).start(); + selector.isRunningFuture().actionGet(); + } + } + + for (AcceptingSelector acceptor : acceptors) { + if (acceptor.isRunning() == false) { + ThreadFactory threadFactory = daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX); + threadFactory.newThread(acceptor::runLoop).start(); + acceptor.isRunningFuture().actionGet(); + } + } + + super.doStart(); + success = true; + } catch (IOException e) { + throw new ElasticsearchException(e); + } finally { + if (success == false) { + doStop(); + } + } + } + + @Override + protected void stopInternal() { + NioShutdown nioShutdown = new NioShutdown(logger); + nioShutdown.orderlyShutdown(openChannels, client, acceptors, socketSelectors); + + profileToChannelFactory.clear(); + socketSelectors.clear(); + } + + protected SocketEventHandler getSocketEventHandler() { + return new SocketEventHandler(logger, this::exceptionCaught); + } + + final void exceptionCaught(NioSocketChannel channel, Throwable cause) { + final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class); + final Throwable t = unwrapped != null ? unwrapped : cause; + onException(channel, t instanceof Exception ? (Exception) t : new ElasticsearchException(t)); + } + + private Settings createFallbackSettings() { + Settings.Builder fallbackSettingsBuilder = Settings.builder(); + + List fallbackBindHost = TransportSettings.BIND_HOST.get(settings); + if (fallbackBindHost.isEmpty() == false) { + fallbackSettingsBuilder.putArray("bind_host", fallbackBindHost); + } + + List fallbackPublishHost = TransportSettings.PUBLISH_HOST.get(settings); + if (fallbackPublishHost.isEmpty() == false) { + fallbackSettingsBuilder.putArray("publish_host", fallbackPublishHost); + } + + boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.nio.tcp_no_delay", + NetworkService.TcpSettings.TCP_NO_DELAY.get(settings)); + fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay); + + boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.nio.tcp_keep_alive", + NetworkService.TcpSettings.TCP_KEEP_ALIVE.get(settings)); + fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive); + + boolean fallbackReuseAddress = settings.getAsBoolean("transport.nio.reuse_address", + NetworkService.TcpSettings.TCP_REUSE_ADDRESS.get(settings)); + fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress); + + ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.nio.tcp_send_buffer_size", + TCP_SEND_BUFFER_SIZE.get(settings)); + if (fallbackTcpSendBufferSize.getBytes() >= 0) { + fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize); + } + + ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.nio.tcp_receive_buffer_size", + TCP_RECEIVE_BUFFER_SIZE.get(settings)); + if (fallbackTcpBufferSize.getBytes() >= 0) { + fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize); + } + + return fallbackSettingsBuilder.build(); + } + + private NioClient createClient() { + Supplier selectorSupplier = new RoundRobinSelectorSupplier(socketSelectors); + ChannelFactory channelFactory = new ChannelFactory(settings, tcpReadHandler); + return new NioClient(logger, openChannels, selectorSupplier, defaultConnectionProfile.getConnectTimeout(), channelFactory); + } + + class ClientChannelCloseListener implements Consumer { + + private final Consumer consumer; + + private ClientChannelCloseListener(Consumer consumer) { + this.consumer = consumer; + } + + @Override + public void accept(final NioChannel channel) { + consumer.accept(channel); + openChannels.channelClosed(channel); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/OpenChannels.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/OpenChannels.java new file mode 100644 index 00000000000..eea353a6c14 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/OpenChannels.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; + +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; + +public class OpenChannels implements Releasable { + + // TODO: Maybe set concurrency levels? + private final ConcurrentMap openClientChannels = newConcurrentMap(); + private final ConcurrentMap openAcceptedChannels = newConcurrentMap(); + private final ConcurrentMap openServerChannels = newConcurrentMap(); + + private final Logger logger; + + public OpenChannels(Logger logger) { + this.logger = logger; + } + + public void serverChannelOpened(NioServerSocketChannel channel) { + boolean added = openServerChannels.putIfAbsent(channel, System.nanoTime()) == null; + if (added && logger.isTraceEnabled()) { + logger.trace("server channel opened: {}", channel); + } + } + + public long serverChannelsCount() { + return openServerChannels.size(); + } + + public void acceptedChannelOpened(NioSocketChannel channel) { + boolean added = openAcceptedChannels.putIfAbsent(channel, System.nanoTime()) == null; + if (added && logger.isTraceEnabled()) { + logger.trace("accepted channel opened: {}", channel); + } + } + + public HashSet getAcceptedChannels() { + return new HashSet<>(openAcceptedChannels.keySet()); + } + + public void clientChannelOpened(NioSocketChannel channel) { + boolean added = openClientChannels.putIfAbsent(channel, System.nanoTime()) == null; + if (added && logger.isTraceEnabled()) { + logger.trace("client channel opened: {}", channel); + } + } + + public void channelClosed(NioChannel channel) { + boolean removed; + if (channel instanceof NioServerSocketChannel) { + removed = openServerChannels.remove(channel) != null; + } else { + NioSocketChannel socketChannel = (NioSocketChannel) channel; + removed = openClientChannels.remove(socketChannel) != null; + if (removed == false) { + removed = openAcceptedChannels.remove(socketChannel) != null; + } + } + if (removed && logger.isTraceEnabled()) { + logger.trace("channel closed: {}", channel); + } + } + + public void closeServerChannels() { + for (NioServerSocketChannel channel : openServerChannels.keySet()) { + ensureClosedInternal(channel); + } + + openServerChannels.clear(); + } + + @Override + public void close() { + for (NioSocketChannel channel : openClientChannels.keySet()) { + ensureClosedInternal(channel); + } + for (NioSocketChannel channel : openAcceptedChannels.keySet()) { + ensureClosedInternal(channel); + } + + openClientChannels.clear(); + openAcceptedChannels.clear(); + } + + private void ensureClosedInternal(NioChannel channel) { + try { + channel.closeAsync().get(); + } catch (Exception e) { + logger.trace("exception while closing channels", e); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/RoundRobinSelectorSupplier.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/RoundRobinSelectorSupplier.java new file mode 100644 index 00000000000..108242b1e0e --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/RoundRobinSelectorSupplier.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +public class RoundRobinSelectorSupplier implements Supplier { + + private final ArrayList selectors; + private final int count; + private AtomicInteger counter = new AtomicInteger(0); + + public RoundRobinSelectorSupplier(ArrayList selectors) { + this.count = selectors.size(); + this.selectors = selectors; + } + + public SocketSelector get() { + return selectors.get(counter.getAndIncrement() % count); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/SocketEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/SocketEventHandler.java new file mode 100644 index 00000000000..6905f7957b3 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/SocketEventHandler.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.elasticsearch.transport.nio.channel.SelectionKeyUtils; +import org.elasticsearch.transport.nio.channel.WriteContext; + +import java.io.IOException; +import java.util.function.BiConsumer; + +/** + * Event handler designed to handle events from non-server sockets + */ +public class SocketEventHandler extends EventHandler { + + private final BiConsumer exceptionHandler; + private final Logger logger; + + public SocketEventHandler(Logger logger, BiConsumer exceptionHandler) { + super(logger); + this.exceptionHandler = exceptionHandler; + this.logger = logger; + } + + /** + * This method is called when a NioSocketChannel is successfully registered. It should only be called + * once per channel. + * + * @param channel that was registered + */ + public void handleRegistration(NioSocketChannel channel) { + SelectionKeyUtils.setConnectAndReadInterested(channel); + } + + /** + * This method is called when an attempt to register a channel throws an exception. + * + * @param channel that was registered + * @param exception that occurred + */ + public void registrationException(NioSocketChannel channel, Exception exception) { + logger.trace("failed to register channel", exception); + exceptionCaught(channel, exception); + } + + /** + * This method is called when a NioSocketChannel is successfully connected. It should only be called + * once per channel. + * + * @param channel that was registered + */ + public void handleConnect(NioSocketChannel channel) { + SelectionKeyUtils.removeConnectInterested(channel); + } + + /** + * This method is called when an attempt to connect a channel throws an exception. + * + * @param channel that was connecting + * @param exception that occurred + */ + public void connectException(NioSocketChannel channel, Exception exception) { + logger.trace("failed to connect to channel", exception); + exceptionCaught(channel, exception); + + } + + /** + * This method is called when a channel signals it is ready for be read. All of the read logic should + * occur in this call. + * + * @param channel that can be read + */ + public void handleRead(NioSocketChannel channel) throws IOException { + int bytesRead = channel.getReadContext().read(); + if (bytesRead == -1) { + handleClose(channel); + } + } + + /** + * This method is called when an attempt to read from a channel throws an exception. + * + * @param channel that was being read + * @param exception that occurred + */ + public void readException(NioSocketChannel channel, Exception exception) { + logger.trace("failed to read from channel", exception); + exceptionCaught(channel, exception); + } + + /** + * This method is called when a channel signals it is ready to receive writes. All of the write logic + * should occur in this call. + * + * @param channel that can be read + */ + public void handleWrite(NioSocketChannel channel) throws IOException { + WriteContext channelContext = channel.getWriteContext(); + channelContext.flushChannel(); + if (channelContext.hasQueuedWriteOps()) { + SelectionKeyUtils.setWriteInterested(channel); + } else { + SelectionKeyUtils.removeWriteInterested(channel); + } + } + + /** + * This method is called when an attempt to write to a channel throws an exception. + * + * @param channel that was being written to + * @param exception that occurred + */ + public void writeException(NioSocketChannel channel, Exception exception) { + logger.trace("failed to write to channel", exception); + exceptionCaught(channel, exception); + } + + /** + * This method is called when handling an event from a channel fails due to an unexpected exception. + * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw + * {@link java.nio.channels.CancelledKeyException}. + * + * @param channel that caused the exception + * @param exception that was thrown + */ + public void genericChannelException(NioSocketChannel channel, Exception exception) { + logger.trace("event handling failed", exception); + exceptionCaught(channel, exception); + } + + private void exceptionCaught(NioSocketChannel channel, Exception e) { + exceptionHandler.accept(channel, e); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/SocketSelector.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/SocketSelector.java new file mode 100644 index 00000000000..24f68504d8f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/SocketSelector.java @@ -0,0 +1,216 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.elasticsearch.transport.nio.channel.SelectionKeyUtils; +import org.elasticsearch.transport.nio.channel.WriteContext; + +import java.io.IOException; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ClosedSelectorException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; + +/** + * Selector implementation that handles {@link NioSocketChannel}. It's main piece of functionality is + * handling connect, read, and write events. + */ +public class SocketSelector extends ESSelector { + + private final ConcurrentLinkedQueue newChannels = new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue queuedWrites = new ConcurrentLinkedQueue<>(); + private final SocketEventHandler eventHandler; + + public SocketSelector(SocketEventHandler eventHandler) throws IOException { + super(eventHandler); + this.eventHandler = eventHandler; + } + + public SocketSelector(SocketEventHandler eventHandler, Selector selector) throws IOException { + super(eventHandler, selector); + this.eventHandler = eventHandler; + } + + @Override + void doSelect(int timeout) throws IOException, ClosedSelectorException { + setUpNewChannels(); + handleQueuedWrites(); + + int ready = selector.select(timeout); + if (ready > 0) { + Set selectionKeys = selector.selectedKeys(); + processKeys(selectionKeys); + } + + } + + @Override + void cleanup() { + WriteOperation op; + while ((op = queuedWrites.poll()) != null) { + op.getListener().onFailure(new ClosedSelectorException()); + } + channelsToClose.addAll(newChannels); + channelsToClose.addAll(registeredChannels); + closePendingChannels(); + } + + /** + * Registers a NioSocketChannel to be handled by this selector. The channel will by queued and eventually + * registered next time through the event loop. + * @param nioSocketChannel the channel to register + */ + public void registerSocketChannel(NioSocketChannel nioSocketChannel) { + newChannels.offer(nioSocketChannel); + wakeup(); + } + + + /** + * Queues a write operation to be handled by the event loop. This can be called by any thread and is the + * api available for non-selector threads to schedule writes. + * + * @param writeOperation to be queued + */ + public void queueWrite(WriteOperation writeOperation) { + queuedWrites.offer(writeOperation); + if (isOpen() == false) { + boolean wasRemoved = queuedWrites.remove(writeOperation); + if (wasRemoved) { + writeOperation.getListener().onFailure(new ClosedSelectorException()); + } + } else { + wakeup(); + } + } + + /** + * Queues a write operation directly in a channel's buffer. Channel buffers are only safe to be accessed + * by the selector thread. As a result, this method should only be called by the selector thread. + * + * @param writeOperation to be queued in a channel's buffer + */ + public void queueWriteInChannelBuffer(WriteOperation writeOperation) { + assert isOnCurrentThread() : "Must be on selector thread"; + NioSocketChannel channel = writeOperation.getChannel(); + WriteContext context = channel.getWriteContext(); + try { + SelectionKeyUtils.setWriteInterested(channel); + context.queueWriteOperations(writeOperation); + } catch (Exception e) { + writeOperation.getListener().onFailure(e); + } + } + + private void processKeys(Set selectionKeys) { + Iterator keyIterator = selectionKeys.iterator(); + while (keyIterator.hasNext()) { + SelectionKey sk = keyIterator.next(); + keyIterator.remove(); + NioSocketChannel nioSocketChannel = (NioSocketChannel) sk.attachment(); + if (sk.isValid()) { + try { + int ops = sk.readyOps(); + if ((ops & SelectionKey.OP_CONNECT) != 0) { + attemptConnect(nioSocketChannel); + } + + if (nioSocketChannel.isConnectComplete()) { + if ((ops & SelectionKey.OP_WRITE) != 0) { + handleWrite(nioSocketChannel); + } + + if ((ops & SelectionKey.OP_READ) != 0) { + handleRead(nioSocketChannel); + } + } + } catch (CancelledKeyException e) { + eventHandler.genericChannelException(nioSocketChannel, e); + } + } else { + eventHandler.genericChannelException(nioSocketChannel, new CancelledKeyException()); + } + } + } + + + private void handleWrite(NioSocketChannel nioSocketChannel) { + try { + eventHandler.handleWrite(nioSocketChannel); + } catch (Exception e) { + eventHandler.writeException(nioSocketChannel, e); + } + } + + private void handleRead(NioSocketChannel nioSocketChannel) { + try { + eventHandler.handleRead(nioSocketChannel); + } catch (Exception e) { + eventHandler.readException(nioSocketChannel, e); + } + } + + private void handleQueuedWrites() { + WriteOperation writeOperation; + while ((writeOperation = queuedWrites.poll()) != null) { + if (writeOperation.getChannel().isWritable()) { + queueWriteInChannelBuffer(writeOperation); + } else { + writeOperation.getListener().onFailure(new ClosedChannelException()); + } + } + } + + private void setUpNewChannels() { + NioSocketChannel newChannel; + while ((newChannel = this.newChannels.poll()) != null) { + setupChannel(newChannel); + } + } + + private void setupChannel(NioSocketChannel newChannel) { + try { + if (newChannel.register(this)) { + registeredChannels.add(newChannel); + SelectionKey key = newChannel.getSelectionKey(); + key.attach(newChannel); + eventHandler.handleRegistration(newChannel); + attemptConnect(newChannel); + } + } catch (Exception e) { + eventHandler.registrationException(newChannel, e); + } + } + + private void attemptConnect(NioSocketChannel newChannel) { + try { + if (newChannel.finishConnect()) { + eventHandler.handleConnect(newChannel); + } + } catch (Exception e) { + eventHandler.connectException(newChannel, e); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java new file mode 100644 index 00000000000..b41d87a0c09 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.io.IOException; + +public class TcpReadHandler { + + private final NioTransport transport; + + public TcpReadHandler(NioTransport transport) { + this.transport = transport; + } + + public void handleMessage(BytesReference reference, NioSocketChannel channel, String profileName, + int messageBytesLength) { + try { + transport.messageReceived(reference, channel, profileName, channel.getRemoteAddress(), messageBytesLength); + } catch (IOException e) { + handleException(channel, e); + } + } + + public void handleException(NioSocketChannel channel, Exception e) { + transport.exceptionCaught(channel, e); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/WriteOperation.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/WriteOperation.java new file mode 100644 index 00000000000..67ed2447f63 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/WriteOperation.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.io.IOException; +import java.util.ArrayList; + +public class WriteOperation { + + private final NioSocketChannel channel; + private final ActionListener listener; + private final NetworkBytesReference[] references; + + public WriteOperation(NioSocketChannel channel, BytesReference bytesReference, ActionListener listener) { + this.channel = channel; + this.listener = listener; + this.references = toArray(bytesReference); + } + + public NetworkBytesReference[] getByteReferences() { + return references; + } + + public ActionListener getListener() { + return listener; + } + + public NioSocketChannel getChannel() { + return channel; + } + + public boolean isFullyFlushed() { + return references[references.length - 1].hasReadRemaining() == false; + } + + public int flush() throws IOException { + return channel.write(references); + } + + private static NetworkBytesReference[] toArray(BytesReference reference) { + BytesRefIterator byteRefIterator = reference.iterator(); + BytesRef r; + try { + // Most network messages are composed of three buffers + ArrayList references = new ArrayList<>(3); + while ((r = byteRefIterator.next()) != null) { + references.add(NetworkBytesReference.wrap(new BytesArray(r), r.length, 0)); + } + return references.toArray(new NetworkBytesReference[references.size()]); + + } catch (IOException e) { + // this is really an error since we don't do IO in our bytesreferences + throw new AssertionError("won't happen", e); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java new file mode 100644 index 00000000000..be8dbe3f468 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/AbstractNioChannel.java @@ -0,0 +1,205 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.transport.nio.ESSelector; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SelectableChannel; +import java.nio.channels.SelectionKey; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This is a basic channel abstraction used by the {@link org.elasticsearch.transport.nio.NioTransport}. + *

+ * A channel is open once it is constructed. The channel remains open and {@link #isOpen()} will return + * true until the channel is explicitly closed. + *

+ * A channel lifecycle has four stages: + *

    + *
  1. UNREGISTERED - When a channel is created and prior to it being registered with a selector. + *
  2. REGISTERED - When a channel has been registered with a selector. This is the state of a channel that + * can perform normal operations. + *
  3. CLOSING - When a channel has been marked for closed, but is not yet closed. {@link #isOpen()} will + * still return true. Normal operations should be rejected. The most common scenario for a channel to be + * CLOSING is when channel that was REGISTERED has {@link #closeAsync()} called, but the selector thread + * has not yet closed the channel. + *
  4. CLOSED - The channel has been closed. + *
+ * + * @param the type of raw channel this AbstractNioChannel uses + */ +public abstract class AbstractNioChannel implements NioChannel { + + static final int UNREGISTERED = 0; + static final int REGISTERED = 1; + static final int CLOSING = 2; + static final int CLOSED = 3; + + final S socketChannel; + final AtomicInteger state = new AtomicInteger(UNREGISTERED); + + private final InetSocketAddress localAddress; + private final String profile; + private final CloseFuture closeFuture = new CloseFuture(); + private volatile ESSelector selector; + private SelectionKey selectionKey; + + public AbstractNioChannel(String profile, S socketChannel) throws IOException { + this.profile = profile; + this.socketChannel = socketChannel; + this.localAddress = (InetSocketAddress) socketChannel.getLocalAddress(); + } + + @Override + public boolean isOpen() { + return closeFuture.isClosed() == false; + } + + @Override + public InetSocketAddress getLocalAddress() { + return localAddress; + } + + @Override + public String getProfile() { + return profile; + } + + /** + * Schedules a channel to be closed by the selector event loop with which it is registered. + *

+ * If the current state is UNREGISTERED, the call will attempt to transition the state from UNREGISTERED + * to CLOSING. If this transition is successful, the channel can no longer be registered with an event + * loop and the channel will be synchronously closed in this method call. + *

+ * If the channel is REGISTERED and the state can be transitioned to CLOSING, the close operation will + * be scheduled with the event loop. + *

+ * If the channel is CLOSING or CLOSED, nothing will be done. + * + * @return future that will be complete when the channel is closed + */ + @Override + public CloseFuture closeAsync() { + if (selector != null && selector.isOnCurrentThread()) { + closeFromSelector(); + return closeFuture; + } + + for (; ; ) { + int state = this.state.get(); + if (state == UNREGISTERED && this.state.compareAndSet(UNREGISTERED, CLOSING)) { + close0(); + break; + } else if (state == REGISTERED && this.state.compareAndSet(REGISTERED, CLOSING)) { + selector.queueChannelClose(this); + break; + } else if (state == CLOSING || state == CLOSED) { + break; + } + } + return closeFuture; + } + + /** + * Closes the channel synchronously. This method should only be called from the selector thread. + *

+ * Once this method returns, the channel will be closed. + */ + @Override + public void closeFromSelector() { + // This will not exit the loop until this thread or someone else has set the state to CLOSED. + // Whichever thread succeeds in setting the state to CLOSED will close the raw channel. + for (; ; ) { + int state = this.state.get(); + if (state < CLOSING && this.state.compareAndSet(state, CLOSING)) { + close0(); + } else if (state == CLOSING) { + close0(); + } else if (state == CLOSED) { + break; + } + } + } + + /** + * This method attempts to registered a channel with a selector. If method returns true the channel was + * successfully registered. If it returns false, the registration failed. The reason a registered might + * fail is if something else closed this channel. + * + * @param selector to register the channel + * @return if the channel was successfully registered + * @throws ClosedChannelException if the raw channel was closed + */ + @Override + public boolean register(ESSelector selector) throws ClosedChannelException { + if (markRegistered(selector)) { + setSelectionKey(socketChannel.register(selector.rawSelector(), 0)); + return true; + } else { + return false; + } + } + + @Override + public ESSelector getSelector() { + return selector; + } + + @Override + public SelectionKey getSelectionKey() { + return selectionKey; + } + + @Override + public CloseFuture getCloseFuture() { + return closeFuture; + } + + @Override + public S getRawChannel() { + return socketChannel; + } + + // Package visibility for testing + void setSelectionKey(SelectionKey selectionKey) { + this.selectionKey = selectionKey; + } + + boolean markRegistered(ESSelector selector) { + this.selector = selector; + return state.compareAndSet(UNREGISTERED, REGISTERED); + } + + private void close0() { + if (this.state.compareAndSet(CLOSING, CLOSED)) { + try { + socketChannel.close(); + closeFuture.channelClosed(this); + } catch (IOException e) { + closeFuture.channelCloseThrewException(this, e); + } + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java new file mode 100644 index 00000000000..84c36d41104 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ChannelFactory.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.PrivilegedSocketAccess; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.nio.TcpReadHandler; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +public class ChannelFactory { + + private final boolean tcpNoDelay; + private final boolean tcpKeepAlive; + private final boolean tcpReusedAddress; + private final int tcpSendBufferSize; + private final int tcpReceiveBufferSize; + private final TcpReadHandler handler; + + public ChannelFactory(Settings settings, TcpReadHandler handler) { + tcpNoDelay = TcpTransport.TCP_NO_DELAY.get(settings); + tcpKeepAlive = TcpTransport.TCP_KEEP_ALIVE.get(settings); + tcpReusedAddress = TcpTransport.TCP_REUSE_ADDRESS.get(settings); + tcpSendBufferSize = Math.toIntExact(TcpTransport.TCP_SEND_BUFFER_SIZE.get(settings).getBytes()); + tcpReceiveBufferSize = Math.toIntExact(TcpTransport.TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); + this.handler = handler; + } + + public NioSocketChannel openNioChannel(InetSocketAddress remoteAddress) throws IOException { + SocketChannel rawChannel = SocketChannel.open(); + configureSocketChannel(rawChannel); + PrivilegedSocketAccess.connect(rawChannel, remoteAddress); + NioSocketChannel channel = new NioSocketChannel(NioChannel.CLIENT, rawChannel); + channel.setContexts(new TcpReadContext(channel, handler), new TcpWriteContext(channel)); + return channel; + } + + public NioSocketChannel acceptNioChannel(NioServerSocketChannel serverChannel) throws IOException { + ServerSocketChannel serverSocketChannel = serverChannel.getRawChannel(); + SocketChannel rawChannel = PrivilegedSocketAccess.accept(serverSocketChannel); + configureSocketChannel(rawChannel); + NioSocketChannel channel = new NioSocketChannel(serverChannel.getProfile(), rawChannel); + channel.setContexts(new TcpReadContext(channel, handler), new TcpWriteContext(channel)); + return channel; + } + + public NioServerSocketChannel openNioServerSocketChannel(String profileName, InetSocketAddress address) + throws IOException { + ServerSocketChannel socketChannel = ServerSocketChannel.open(); + socketChannel.configureBlocking(false); + ServerSocket socket = socketChannel.socket(); + socket.setReuseAddress(tcpReusedAddress); + socketChannel.bind(address); + return new NioServerSocketChannel(profileName, socketChannel, this); + } + + private void configureSocketChannel(SocketChannel channel) throws IOException { + channel.configureBlocking(false); + Socket socket = channel.socket(); + socket.setTcpNoDelay(tcpNoDelay); + socket.setKeepAlive(tcpKeepAlive); + socket.setReuseAddress(tcpReusedAddress); + if (tcpSendBufferSize > 0) { + socket.setSendBufferSize(tcpSendBufferSize); + } + if (tcpReceiveBufferSize > 0) { + socket.setSendBufferSize(tcpReceiveBufferSize); + } + } + + private static T getSocketChannel(CheckedSupplier supplier) throws IOException { + try { + return AccessController.doPrivileged((PrivilegedExceptionAction) supplier::get); + } catch (PrivilegedActionException e) { + throw (IOException) e.getCause(); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java new file mode 100644 index 00000000000..e41632174ac --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/CloseFuture.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.util.concurrent.BaseFuture; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; + +public class CloseFuture extends BaseFuture { + + private final SetOnce> listener = new SetOnce<>(); + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + throw new UnsupportedOperationException("Cannot cancel close future"); + } + + public void awaitClose() throws InterruptedException, IOException { + try { + super.get(); + } catch (ExecutionException e) { + throw (IOException) e.getCause(); + } + } + + public void awaitClose(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException, IOException { + try { + super.get(timeout, unit); + } catch (ExecutionException e) { + throw (IOException) e.getCause(); + } + } + + public IOException getCloseException() { + if (isDone()) { + try { + super.get(0, TimeUnit.NANOSECONDS); + return null; + } catch (ExecutionException e) { + // We only make a setter for IOException + return (IOException) e.getCause(); + } catch (TimeoutException e) { + return null; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } + } else { + return null; + } + } + + public boolean isClosed() { + return super.isDone(); + } + + public void setListener(Consumer listener) { + this.listener.set(listener); + } + + void channelClosed(NioChannel channel) { + boolean set = set(channel); + if (set) { + Consumer listener = this.listener.get(); + if (listener != null) { + listener.accept(channel); + } + } + } + + + void channelCloseThrewException(NioChannel channel, IOException ex) { + boolean set = setException(ex); + if (set) { + Consumer listener = this.listener.get(); + if (listener != null) { + listener.accept(channel); + } + } + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ConnectFuture.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ConnectFuture.java new file mode 100644 index 00000000000..4bc1ca6043c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ConnectFuture.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.util.concurrent.BaseFuture; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +public class ConnectFuture extends BaseFuture { + + public boolean awaitConnectionComplete(long timeout, TimeUnit unit) throws InterruptedException { + try { + super.get(timeout, unit); + return true; + } catch (ExecutionException | TimeoutException e) { + return false; + } + } + + public Exception getException() { + if (isDone()) { + try { + // Get should always return without blocking as we already checked 'isDone' + // We are calling 'get' here in order to throw the ExecutionException + super.get(); + return null; + } catch (ExecutionException e) { + // We only make a public setters for IOException or RuntimeException + return (Exception) e.getCause(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } + } else { + return null; + } + } + + public boolean isConnectComplete() { + return getChannel() != null; + } + + public boolean connectFailed() { + return getException() != null; + } + + void setConnectionComplete(NioSocketChannel channel) { + set(channel); + } + + void setConnectionFailed(IOException e) { + setException(e); + } + + void setConnectionFailed(RuntimeException e) { + setException(e); + } + + private NioSocketChannel getChannel() { + if (isDone()) { + try { + // Get should always return without blocking as we already checked 'isDone' + return super.get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } catch (ExecutionException e) { + return null; + } + } else { + return null; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioChannel.java new file mode 100644 index 00000000000..281e296391c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioChannel.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.transport.nio.ESSelector; + +import java.net.InetSocketAddress; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SelectionKey; + +public interface NioChannel { + + String CLIENT = "client-socket"; + + boolean isOpen(); + + InetSocketAddress getLocalAddress(); + + String getProfile(); + + CloseFuture closeAsync(); + + void closeFromSelector(); + + boolean register(ESSelector selector) throws ClosedChannelException; + + ESSelector getSelector(); + + SelectionKey getSelectionKey(); + + CloseFuture getCloseFuture(); + + NetworkChannel getRawChannel(); +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannel.java new file mode 100644 index 00000000000..bc8d423a45d --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannel.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import java.io.IOException; +import java.nio.channels.ServerSocketChannel; + +public class NioServerSocketChannel extends AbstractNioChannel { + + private final ChannelFactory channelFactory; + + public NioServerSocketChannel(String profile, ServerSocketChannel socketChannel, ChannelFactory channelFactory) throws IOException { + super(profile, socketChannel); + this.channelFactory = channelFactory; + } + + public ChannelFactory getChannelFactory() { + return channelFactory; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioSocketChannel.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioSocketChannel.java new file mode 100644 index 00000000000..62404403de0 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/NioSocketChannel.java @@ -0,0 +1,189 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.transport.nio.NetworkBytesReference; +import org.elasticsearch.transport.nio.ESSelector; +import org.elasticsearch.transport.nio.SocketSelector; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SocketChannel; +import java.util.Arrays; + +public class NioSocketChannel extends AbstractNioChannel { + + private final InetSocketAddress remoteAddress; + private final ConnectFuture connectFuture = new ConnectFuture(); + private volatile SocketSelector socketSelector; + private WriteContext writeContext; + private ReadContext readContext; + + public NioSocketChannel(String profile, SocketChannel socketChannel) throws IOException { + super(profile, socketChannel); + this.remoteAddress = (InetSocketAddress) socketChannel.getRemoteAddress(); + } + + @Override + public CloseFuture closeAsync() { + clearQueuedWrites(); + + return super.closeAsync(); + } + + @Override + public void closeFromSelector() { + // Even if the channel has already been closed we will clear any pending write operations just in case + clearQueuedWrites(); + + super.closeFromSelector(); + } + + @Override + public SocketSelector getSelector() { + return socketSelector; + } + + @Override + boolean markRegistered(ESSelector selector) { + this.socketSelector = (SocketSelector) selector; + return super.markRegistered(selector); + } + + public int write(NetworkBytesReference[] references) throws IOException { + int written; + if (references.length == 1) { + written = socketChannel.write(references[0].getReadByteBuffer()); + } else { + ByteBuffer[] buffers = new ByteBuffer[references.length]; + for (int i = 0; i < references.length; ++i) { + buffers[i] = references[i].getReadByteBuffer(); + } + written = (int) socketChannel.write(buffers); + } + if (written <= 0) { + return written; + } + + NetworkBytesReference.vectorizedIncrementReadIndexes(Arrays.asList(references), written); + + return written; + } + + public int read(NetworkBytesReference reference) throws IOException { + int bytesRead = socketChannel.read(reference.getWriteByteBuffer()); + + if (bytesRead == -1) { + return bytesRead; + } + + reference.incrementWrite(bytesRead); + return bytesRead; + } + + public void setContexts(ReadContext readContext, WriteContext writeContext) { + this.readContext = readContext; + this.writeContext = writeContext; + } + + public WriteContext getWriteContext() { + return writeContext; + } + + public ReadContext getReadContext() { + return readContext; + } + + public InetSocketAddress getRemoteAddress() { + return remoteAddress; + } + + public boolean isConnectComplete() { + return connectFuture.isConnectComplete(); + } + + public boolean isWritable() { + return state.get() == REGISTERED; + } + + public boolean isReadable() { + return state.get() == REGISTERED; + } + + /** + * This method will attempt to complete the connection process for this channel. It should be called for + * new channels or for a channel that has produced a OP_CONNECT event. If this method returns true then + * the connection is complete and the channel is ready for reads and writes. If it returns false, the + * channel is not yet connected and this method should be called again when a OP_CONNECT event is + * received. + * + * @return true if the connection process is complete + * @throws IOException if an I/O error occurs + */ + public boolean finishConnect() throws IOException { + if (connectFuture.isConnectComplete()) { + return true; + } else if (connectFuture.connectFailed()) { + Exception exception = connectFuture.getException(); + if (exception instanceof IOException) { + throw (IOException) exception; + } else { + throw (RuntimeException) exception; + } + } + + boolean isConnected = socketChannel.isConnected(); + if (isConnected == false) { + isConnected = internalFinish(); + } + if (isConnected) { + connectFuture.setConnectionComplete(this); + } + return isConnected; + } + + public ConnectFuture getConnectFuture() { + return connectFuture; + } + + private boolean internalFinish() throws IOException { + try { + return socketChannel.finishConnect(); + } catch (IOException e) { + connectFuture.setConnectionFailed(e); + throw e; + } catch (RuntimeException e) { + connectFuture.setConnectionFailed(e); + throw e; + } + } + + private void clearQueuedWrites() { + // Even if the channel has already been closed we will clear any pending write operations just in case + if (state.get() > UNREGISTERED) { + SocketSelector selector = getSelector(); + if (selector != null && selector.isOnCurrentThread() && writeContext.hasQueuedWriteOps()) { + writeContext.clearQueuedWriteOps(new ClosedChannelException()); + } + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ReadContext.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ReadContext.java new file mode 100644 index 00000000000..9d2919b1928 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/ReadContext.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import java.io.IOException; + +public interface ReadContext { + + int read() throws IOException; + +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/SelectionKeyUtils.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/SelectionKeyUtils.java new file mode 100644 index 00000000000..b0cf5552064 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/SelectionKeyUtils.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import java.nio.channels.CancelledKeyException; +import java.nio.channels.SelectionKey; + +public final class SelectionKeyUtils { + + private SelectionKeyUtils() {} + + public static void setWriteInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_WRITE); + } + + public static void removeWriteInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_WRITE); + } + + public static void setConnectAndReadInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_CONNECT | SelectionKey.OP_READ); + } + + public static void removeConnectInterested(NioChannel channel) throws CancelledKeyException { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() & ~SelectionKey.OP_CONNECT); + } + + public static void setAcceptInterested(NioServerSocketChannel channel) { + SelectionKey selectionKey = channel.getSelectionKey(); + selectionKey.interestOps(selectionKey.interestOps() | SelectionKey.OP_ACCEPT); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpFrameDecoder.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpFrameDecoder.java new file mode 100644 index 00000000000..356af44c5ba --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpFrameDecoder.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.transport.TcpHeader; +import org.elasticsearch.transport.TcpTransport; + +import java.io.IOException; +import java.io.StreamCorruptedException; + +public class TcpFrameDecoder { + + private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); + private static final int HEADER_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + + private int expectedMessageLength = -1; + + public BytesReference decode(BytesReference bytesReference, int currentBufferSize) throws IOException { + if (currentBufferSize >= 6) { + int messageLength = readHeaderBuffer(bytesReference); + int totalLength = messageLength + HEADER_SIZE; + if (totalLength > currentBufferSize) { + expectedMessageLength = totalLength; + return null; + } else if (totalLength == bytesReference.length()) { + expectedMessageLength = -1; + return bytesReference; + } else { + expectedMessageLength = -1; + return bytesReference.slice(0, totalLength); + } + } else { + return null; + } + } + + public int expectedMessageLength() { + return expectedMessageLength; + } + + private int readHeaderBuffer(BytesReference headerBuffer) throws IOException { + if (headerBuffer.get(0) != 'E' || headerBuffer.get(1) != 'S') { + if (appearsToBeHTTP(headerBuffer)) { + throw new TcpTransport.HttpOnTransportException("This is not a HTTP port"); + } + + throw new StreamCorruptedException("invalid internal transport message format, got (" + + Integer.toHexString(headerBuffer.get(0) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(1) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(2) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(3) & 0xFF) + ")"); + } + final int messageLength; + try (StreamInput input = headerBuffer.streamInput()) { + input.skip(TcpHeader.MARKER_BYTES_SIZE); + messageLength = input.readInt(); + } + + if (messageLength == -1) { + // This is a ping + return 0; + } + + if (messageLength <= 0) { + throw new StreamCorruptedException("invalid data length: " + messageLength); + } + + if (messageLength > NINETY_PER_HEAP_SIZE) { + throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(messageLength) + "] exceeded [" + + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + } + + return messageLength; + } + + private static boolean appearsToBeHTTP(BytesReference headerBuffer) { + return bufferStartsWith(headerBuffer, "GET") || + bufferStartsWith(headerBuffer, "POST") || + bufferStartsWith(headerBuffer, "PUT") || + bufferStartsWith(headerBuffer, "HEAD") || + bufferStartsWith(headerBuffer, "DELETE") || + // TODO: Actually 'OPTIONS'. But that does not currently fit in 6 bytes + bufferStartsWith(headerBuffer, "OPTION") || + bufferStartsWith(headerBuffer, "PATCH") || + bufferStartsWith(headerBuffer, "TRACE"); + } + + private static boolean bufferStartsWith(BytesReference buffer, String method) { + char[] chars = method.toCharArray(); + for (int i = 0; i < chars.length; i++) { + if (buffer.get(i) != chars[i]) { + return false; + } + } + return true; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpReadContext.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpReadContext.java new file mode 100644 index 00000000000..c332adbd314 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpReadContext.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.transport.nio.NetworkBytesReference; +import org.elasticsearch.transport.nio.TcpReadHandler; + +import java.io.IOException; +import java.util.Iterator; +import java.util.LinkedList; + +public class TcpReadContext implements ReadContext { + + private static final int DEFAULT_READ_LENGTH = 1 << 14; + + private final TcpReadHandler handler; + private final NioSocketChannel channel; + private final TcpFrameDecoder frameDecoder; + private final LinkedList references = new LinkedList<>(); + private int rawBytesCount = 0; + + public TcpReadContext(NioSocketChannel channel, TcpReadHandler handler) { + this(channel, handler, new TcpFrameDecoder()); + } + + public TcpReadContext(NioSocketChannel channel, TcpReadHandler handler, TcpFrameDecoder frameDecoder) { + this.handler = handler; + this.channel = channel; + this.frameDecoder = frameDecoder; + this.references.add(NetworkBytesReference.wrap(new BytesArray(new byte[DEFAULT_READ_LENGTH]))); + } + + @Override + public int read() throws IOException { + NetworkBytesReference last = references.peekLast(); + if (last == null || last.hasWriteRemaining() == false) { + this.references.add(NetworkBytesReference.wrap(new BytesArray(new byte[DEFAULT_READ_LENGTH]))); + } + + int bytesRead = channel.read(references.getLast()); + + if (bytesRead == -1) { + return bytesRead; + } + + rawBytesCount += bytesRead; + + BytesReference message; + + while ((message = frameDecoder.decode(createCompositeBuffer(), rawBytesCount)) != null) { + int messageLengthWithHeader = message.length(); + NetworkBytesReference.vectorizedIncrementReadIndexes(references, messageLengthWithHeader); + trimDecodedMessages(messageLengthWithHeader); + rawBytesCount -= messageLengthWithHeader; + + try { + BytesReference messageWithoutHeader = message.slice(6, message.length() - 6); + handler.handleMessage(messageWithoutHeader, channel, channel.getProfile(), messageWithoutHeader.length()); + } catch (Exception e) { + handler.handleException(channel, e); + } + } + + return bytesRead; + } + + private CompositeBytesReference createCompositeBuffer() { + return new CompositeBytesReference(references.toArray(new BytesReference[references.size()])); + } + + private void trimDecodedMessages(int bytesToTrim) { + while (bytesToTrim != 0) { + NetworkBytesReference ref = references.getFirst(); + int readIndex = ref.getReadIndex(); + bytesToTrim -= readIndex; + if (readIndex == ref.length()) { + references.removeFirst(); + } else { + assert bytesToTrim == 0; + if (readIndex != 0) { + references.removeFirst(); + NetworkBytesReference slicedRef = ref.slice(readIndex, ref.length() - readIndex); + references.addFirst(slicedRef); + } + } + + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpWriteContext.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpWriteContext.java new file mode 100644 index 00000000000..a332ea89a33 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/TcpWriteContext.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.nio.SocketSelector; +import org.elasticsearch.transport.nio.WriteOperation; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.util.LinkedList; + +public class TcpWriteContext implements WriteContext { + + private final NioSocketChannel channel; + private final LinkedList queued = new LinkedList<>(); + + public TcpWriteContext(NioSocketChannel channel) { + this.channel = channel; + } + + @Override + public void sendMessage(BytesReference reference, ActionListener listener) { + if (channel.isWritable() == false) { + listener.onFailure(new ClosedChannelException()); + return; + } + + WriteOperation writeOperation = new WriteOperation(channel, reference, listener); + SocketSelector selector = channel.getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + + // TODO: Eval if we will allow writes from sendMessage + selector.queueWriteInChannelBuffer(writeOperation); + } + + @Override + public void queueWriteOperations(WriteOperation writeOperation) { + assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to queue writes"; + queued.add(writeOperation); + } + + @Override + public void flushChannel() throws IOException { + assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to flush writes"; + int ops = queued.size(); + if (ops == 1) { + singleFlush(queued.pop()); + } else if (ops > 1) { + multiFlush(); + } + } + + @Override + public boolean hasQueuedWriteOps() { + assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to access queued writes"; + return queued.isEmpty() == false; + } + + @Override + public void clearQueuedWriteOps(Exception e) { + assert channel.getSelector().isOnCurrentThread() : "Must be on selector thread to clear queued writes"; + for (WriteOperation op : queued) { + op.getListener().onFailure(e); + } + queued.clear(); + } + + private void singleFlush(WriteOperation headOp) throws IOException { + headOp.flush(); + + if (headOp.isFullyFlushed()) { + headOp.getListener().onResponse(channel); + } else { + queued.push(headOp); + } + } + + private void multiFlush() throws IOException { + boolean lastOpCompleted = true; + while (lastOpCompleted && queued.isEmpty() == false) { + WriteOperation op = queued.pop(); + singleFlush(op); + lastOpCompleted = op.isFullyFlushed(); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/WriteContext.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/WriteContext.java new file mode 100644 index 00000000000..1a14d279dd2 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/channel/WriteContext.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.transport.nio.WriteOperation; + +import java.io.IOException; + +public interface WriteContext { + + void sendMessage(BytesReference reference, ActionListener listener); + + void queueWriteOperations(WriteOperation writeOperation); + + void flushChannel() throws IOException; + + boolean hasQueuedWriteOps(); + + void clearQueuedWriteOps(Exception e); + +} diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json new file mode 100644 index 00000000000..38937a9b5af --- /dev/null +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json @@ -0,0 +1,54 @@ +{ + "index":{ + "analysis":{ + "tokenizer":{ + "standard":{ + "type":"standard" + } + }, + "filter":{ + "stop":{ + "type":"stop", + "stopwords":["test-stop"] + }, + "stop2":{ + "type":"stop", + "stopwords":["stop2-1", "stop2-2"] + }, + "my":{ + "type":"myfilter" + }, + "dict_dec":{ + "type":"dictionary_decompounder", + "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"] + } + }, + "analyzer":{ + "standard":{ + "type":"standard", + "stopwords":["test1", "test2", "test3"] + }, + "custom1":{ + "tokenizer":"standard", + "filter":["stop", "stop2"] + }, + "custom4":{ + "tokenizer":"standard", + "filter":["my"] + }, + "custom6":{ + "tokenizer":"standard", + "position_increment_gap": 256 + }, + "czechAnalyzerWithStemmer":{ + "tokenizer":"standard", + "filter":["standard", "lowercase", "stop", "czech_stem"] + }, + "decompoundingAnalyzer":{ + "tokenizer":"standard", + "filter":["dict_dec"] + } + } + } + } +} diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml new file mode 100644 index 00000000000..f7a57d14dbe --- /dev/null +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml @@ -0,0 +1,39 @@ +index : + analysis : + tokenizer : + standard : + type : standard + filter : + stop : + type : stop + stopwords : [test-stop] + stop2 : + type : stop + stopwords : [stop2-1, stop2-2] + my : + type : myfilter + dict_dec : + type : dictionary_decompounder + word_list : [donau, dampf, schiff, spargel, creme, suppe] + analyzer : + standard : + type : standard + stopwords : [test1, test2, test3] + custom1 : + tokenizer : standard + filter : [stop, stop2] + custom4 : + tokenizer : standard + filter : [my] + custom6 : + tokenizer : standard + position_increment_gap: 256 + custom7 : + type : standard + version: 3.6 + czechAnalyzerWithStemmer : + tokenizer : standard + filter : [standard, lowercase, stop, czech_stem] + decompoundingAnalyzer : + tokenizer : standard + filter : [dict_dec] diff --git a/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java index 38970645505..f3b44f25104 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java @@ -61,7 +61,7 @@ public class XContentTestUtilsTests extends ESTestCase { builder.startObject("inner1"); { builder.field("inner1field1", "value"); - builder.startObject("inner2"); + builder.startObject("inn.er2"); { builder.field("inner2field1", "value"); } @@ -79,7 +79,7 @@ public class XContentTestUtilsTests extends ESTestCase { assertThat(insertPaths, hasItem(equalTo("list1.2"))); assertThat(insertPaths, hasItem(equalTo("list1.4"))); assertThat(insertPaths, hasItem(equalTo("inner1"))); - assertThat(insertPaths, hasItem(equalTo("inner1.inner2"))); + assertThat(insertPaths, hasItem(equalTo("inner1.inn\\.er2"))); } } @@ -89,19 +89,19 @@ public class XContentTestUtilsTests extends ESTestCase { builder.startObject(); builder.endObject(); builder = XContentTestUtils.insertIntoXContent(XContentType.JSON.xContent(), builder.bytes(), Collections.singletonList(""), - () -> "inner1", () -> new HashMap<>()); + () -> "inn.er1", () -> new HashMap<>()); builder = XContentTestUtils.insertIntoXContent(XContentType.JSON.xContent(), builder.bytes(), Collections.singletonList(""), () -> "field1", () -> "value1"); - builder = XContentTestUtils.insertIntoXContent(XContentType.JSON.xContent(), builder.bytes(), Collections.singletonList("inner1"), - () -> "inner2", () -> new HashMap<>()); - builder = XContentTestUtils.insertIntoXContent(XContentType.JSON.xContent(), builder.bytes(), Collections.singletonList("inner1"), - () -> "field2", () -> "value2"); + builder = XContentTestUtils.insertIntoXContent(XContentType.JSON.xContent(), builder.bytes(), + Collections.singletonList("inn\\.er1"), () -> "inner2", () -> new HashMap<>()); + builder = XContentTestUtils.insertIntoXContent(XContentType.JSON.xContent(), builder.bytes(), + Collections.singletonList("inn\\.er1"), () -> "field2", () -> "value2"); try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, builder.bytes(), builder.contentType())) { Map map = parser.map(); assertEquals(2, map.size()); assertEquals("value1", map.get("field1")); - assertThat(map.get("inner1"), instanceOf(Map.class)); - Map innerMap = (Map) map.get("inner1"); + assertThat(map.get("inn.er1"), instanceOf(Map.class)); + Map innerMap = (Map) map.get("inn.er1"); assertEquals(2, innerMap.size()); assertEquals("value2", innerMap.get("field2")); assertThat(innerMap.get("inner2"), instanceOf(Map.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java index bf4c786c110..ff5c193dc0d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/ESTestCaseTests.java @@ -56,7 +56,8 @@ public class ESTestCaseTests extends ESTestCase { }); fail("expected assertion error"); } catch (AssertionFailedError assertFailed) { - assertEquals("Unexpected exception type, expected IllegalArgumentException", assertFailed.getMessage()); + assertEquals("Unexpected exception type, expected IllegalArgumentException but got java.lang.IllegalStateException: bad state", + assertFailed.getMessage()); assertNotNull(assertFailed.getCause()); assertEquals("bad state", assertFailed.getCause().getMessage()); } @@ -66,7 +67,8 @@ public class ESTestCaseTests extends ESTestCase { fail("expected assertion error"); } catch (AssertionFailedError assertFailed) { assertNull(assertFailed.getCause()); - assertEquals("Expected exception IllegalArgumentException", assertFailed.getMessage()); + assertEquals("Expected exception IllegalArgumentException but no exception was thrown", + assertFailed.getMessage()); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 0284a594883..fdfce1a14e9 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -193,6 +193,11 @@ public class InternalTestClusterTests extends ESTestCase { return settings.build(); } + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + @Override public Settings transportClientSettings() { return Settings.builder() @@ -258,6 +263,12 @@ public class InternalTestClusterTests extends ESTestCase { .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) .build(); } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + @Override public Settings transportClientSettings() { return Settings.builder() @@ -350,10 +361,10 @@ public class InternalTestClusterTests extends ESTestCase { final Path baseDir = createTempDir(); final int numNodes = 5; InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, - false, 0, 0, "test", new NodeConfigurationSource() { - @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() + false, 0, 0, "test", new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME) @@ -362,14 +373,19 @@ public class InternalTestClusterTests extends ESTestCase { // elections more likely .put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "3s") .build(); - } + } - @Override - public Settings transportClientSettings() { - return Settings.builder() + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + + @Override + public Settings transportClientSettings() { + return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME).build(); - } - }, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class), Function.identity()); + } + }, 0, randomBoolean(), "", Arrays.asList(MockTcpTransportPlugin.class, TestZenDiscovery.TestPlugin.class), Function.identity()); cluster.beforeTest(random(), 0.0); List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { @@ -440,6 +456,11 @@ public class InternalTestClusterTests extends ESTestCase { .build(); } + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + @Override public Settings transportClientSettings() { return Settings.builder() diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/AcceptingSelectorTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/AcceptingSelectorTests.java new file mode 100644 index 00000000000..e3cf9b0a7e9 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/AcceptingSelectorTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; +import org.elasticsearch.transport.nio.utils.TestSelectionKey; +import org.junit.Before; + +import java.io.IOException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.security.PrivilegedActionException; +import java.util.HashSet; +import java.util.Set; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class AcceptingSelectorTests extends ESTestCase { + + private AcceptingSelector selector; + private NioServerSocketChannel serverChannel; + private AcceptorEventHandler eventHandler; + private TestSelectionKey selectionKey; + private HashSet keySet = new HashSet<>(); + + @Before + public void setUp() throws Exception { + super.setUp(); + + eventHandler = mock(AcceptorEventHandler.class); + serverChannel = mock(NioServerSocketChannel.class); + + Selector rawSelector = mock(Selector.class); + selector = new AcceptingSelector(eventHandler, rawSelector); + this.selector.setThread(); + + selectionKey = new TestSelectionKey(0); + selectionKey.attach(serverChannel); + when(serverChannel.getSelectionKey()).thenReturn(selectionKey); + when(rawSelector.selectedKeys()).thenReturn(keySet); + when(rawSelector.select(0)).thenReturn(1); + } + + public void testRegisteredChannel() throws IOException, PrivilegedActionException { + selector.registerServerChannel(serverChannel); + + when(serverChannel.register(selector)).thenReturn(true); + + selector.doSelect(0); + + verify(eventHandler).serverChannelRegistered(serverChannel); + Set registeredChannels = selector.getRegisteredChannels(); + assertEquals(1, registeredChannels.size()); + assertTrue(registeredChannels.contains(serverChannel)); + } + + public void testAcceptEvent() throws IOException { + selectionKey.setReadyOps(SelectionKey.OP_ACCEPT); + keySet.add(selectionKey); + + selector.doSelect(0); + + verify(eventHandler).acceptChannel(serverChannel); + } + + public void testAcceptException() throws IOException { + selectionKey.setReadyOps(SelectionKey.OP_ACCEPT); + keySet.add(selectionKey); + IOException ioException = new IOException(); + + doThrow(ioException).when(eventHandler).acceptChannel(serverChannel); + + selector.doSelect(0); + + verify(eventHandler).acceptException(serverChannel, ioException); + } + + public void testCleanup() throws IOException { + selector.registerServerChannel(serverChannel); + + when(serverChannel.register(selector)).thenReturn(true); + + selector.doSelect(0); + + assertEquals(1, selector.getRegisteredChannels().size()); + + selector.cleanup(); + + verify(eventHandler).handleClose(serverChannel); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/AcceptorEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/AcceptorEventHandlerTests.java new file mode 100644 index 00000000000..fc6829d5948 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/AcceptorEventHandlerTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.channel.ChannelFactory; +import org.elasticsearch.transport.nio.channel.DoNotRegisterServerChannel; +import org.elasticsearch.transport.nio.channel.NioServerSocketChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.junit.Before; + +import java.io.IOException; +import java.nio.channels.SelectionKey; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class AcceptorEventHandlerTests extends ESTestCase { + + private AcceptorEventHandler handler; + private SocketSelector socketSelector; + private ChannelFactory channelFactory; + private OpenChannels openChannels; + private NioServerSocketChannel channel; + + @Before + public void setUpHandler() throws IOException { + channelFactory = mock(ChannelFactory.class); + socketSelector = mock(SocketSelector.class); + openChannels = new OpenChannels(logger); + ArrayList selectors = new ArrayList<>(); + selectors.add(socketSelector); + handler = new AcceptorEventHandler(logger, openChannels, new RoundRobinSelectorSupplier(selectors)); + + channel = new DoNotRegisterServerChannel("", mock(ServerSocketChannel.class), channelFactory); + channel.register(mock(ESSelector.class)); + } + + public void testHandleRegisterAdjustsOpenChannels() { + assertEquals(0, openChannels.serverChannelsCount()); + + handler.serverChannelRegistered(channel); + + assertEquals(1, openChannels.serverChannelsCount()); + } + + public void testHandleRegisterSetsOP_ACCEPTInterest() { + assertEquals(0, channel.getSelectionKey().interestOps()); + + handler.serverChannelRegistered(channel); + + assertEquals(SelectionKey.OP_ACCEPT, channel.getSelectionKey().interestOps()); + } + + public void testHandleAcceptRegistersWithSelector() throws IOException { + NioSocketChannel childChannel = new NioSocketChannel("", mock(SocketChannel.class)); + when(channelFactory.acceptNioChannel(channel)).thenReturn(childChannel); + + handler.acceptChannel(channel); + + verify(socketSelector).registerSocketChannel(childChannel); + } + + public void testHandleAcceptAddsToOpenChannelsAndAddsCloseListenerToRemove() throws IOException { + NioSocketChannel childChannel = new NioSocketChannel("", SocketChannel.open()); + when(channelFactory.acceptNioChannel(channel)).thenReturn(childChannel); + + handler.acceptChannel(channel); + + assertEquals(new HashSet<>(Arrays.asList(childChannel)), openChannels.getAcceptedChannels()); + + childChannel.closeAsync(); + + assertEquals(new HashSet<>(), openChannels.getAcceptedChannels()); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/ByteBufferReferenceTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/ByteBufferReferenceTests.java new file mode 100644 index 00000000000..335e3d2f778 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/ByteBufferReferenceTests.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; + +import java.nio.ByteBuffer; + +public class ByteBufferReferenceTests extends ESTestCase { + + private NetworkBytesReference buffer; + + public void testBasicGetByte() { + byte[] bytes = new byte[10]; + initializeBytes(bytes); + buffer = NetworkBytesReference.wrap(new BytesArray(bytes)); + + assertEquals(10, buffer.length()); + for (int i = 0 ; i < bytes.length; ++i) { + assertEquals(i, buffer.get(i)); + } + } + + public void testBasicGetByteWithOffset() { + byte[] bytes = new byte[10]; + initializeBytes(bytes); + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 2, 8)); + + assertEquals(8, buffer.length()); + for (int i = 2 ; i < bytes.length; ++i) { + assertEquals(i, buffer.get(i - 2)); + } + } + + public void testBasicGetByteWithOffsetAndLimit() { + byte[] bytes = new byte[10]; + initializeBytes(bytes); + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 2, 6)); + + assertEquals(6, buffer.length()); + for (int i = 2 ; i < bytes.length - 2; ++i) { + assertEquals(i, buffer.get(i - 2)); + } + } + + public void testGetWriteBufferRespectsWriteIndex() { + byte[] bytes = new byte[10]; + + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 2, 8)); + + ByteBuffer writeByteBuffer = buffer.getWriteByteBuffer(); + + assertEquals(2, writeByteBuffer.position()); + assertEquals(10, writeByteBuffer.limit()); + + buffer.incrementWrite(2); + + writeByteBuffer = buffer.getWriteByteBuffer(); + assertEquals(4, writeByteBuffer.position()); + assertEquals(10, writeByteBuffer.limit()); + } + + public void testGetReadBufferRespectsReadIndex() { + byte[] bytes = new byte[10]; + + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 3, 6), 6, 0); + + ByteBuffer readByteBuffer = buffer.getReadByteBuffer(); + + assertEquals(3, readByteBuffer.position()); + assertEquals(9, readByteBuffer.limit()); + + buffer.incrementRead(2); + + readByteBuffer = buffer.getReadByteBuffer(); + assertEquals(5, readByteBuffer.position()); + assertEquals(9, readByteBuffer.limit()); + } + + public void testWriteAndReadRemaining() { + byte[] bytes = new byte[10]; + + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 2, 8)); + + assertEquals(0, buffer.getReadRemaining()); + assertEquals(8, buffer.getWriteRemaining()); + + buffer.incrementWrite(3); + buffer.incrementRead(2); + + assertEquals(1, buffer.getReadRemaining()); + assertEquals(5, buffer.getWriteRemaining()); + } + + public void testBasicSlice() { + byte[] bytes = new byte[20]; + initializeBytes(bytes); + + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 2, 18)); + + NetworkBytesReference slice = buffer.slice(4, 14); + + assertEquals(14, slice.length()); + assertEquals(0, slice.getReadIndex()); + assertEquals(0, slice.getWriteIndex()); + + for (int i = 6; i < 20; ++i) { + assertEquals(i, slice.get(i - 6)); + } + } + + public void testSliceWithReadAndWriteIndexes() { + byte[] bytes = new byte[20]; + initializeBytes(bytes); + + buffer = NetworkBytesReference.wrap(new BytesArray(bytes, 2, 18)); + + buffer.incrementWrite(9); + buffer.incrementRead(5); + + NetworkBytesReference slice = buffer.slice(6, 12); + + assertEquals(12, slice.length()); + assertEquals(0, slice.getReadIndex()); + assertEquals(3, slice.getWriteIndex()); + + for (int i = 8; i < 20; ++i) { + assertEquals(i, slice.get(i - 8)); + } + } + + private void initializeBytes(byte[] bytes) { + for (int i = 0 ; i < bytes.length; ++i) { + bytes[i] = (byte) i; + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/ESSelectorTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/ESSelectorTests.java new file mode 100644 index 00000000000..e57b1bc4efd --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/ESSelectorTests.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.junit.Before; + +import java.io.IOException; +import java.nio.channels.ClosedSelectorException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class ESSelectorTests extends ESTestCase { + + private ESSelector selector; + private EventHandler handler; + + @Before + public void setUp() throws Exception { + super.setUp(); + handler = mock(EventHandler.class); + selector = new TestSelector(handler); + } + + public void testQueueChannelForClosed() throws IOException { + NioChannel channel = mock(NioChannel.class); + selector.registeredChannels.add(channel); + + selector.queueChannelClose(channel); + + assertEquals(1, selector.getRegisteredChannels().size()); + + selector.singleLoop(); + + verify(handler).handleClose(channel); + + assertEquals(0, selector.getRegisteredChannels().size()); + } + + public void testSelectorClosedExceptionIsNotCaughtWhileRunning() throws IOException { + ((TestSelector) this.selector).setClosedSelectorException(new ClosedSelectorException()); + + boolean closedSelectorExceptionCaught = false; + try { + this.selector.singleLoop(); + } catch (ClosedSelectorException e) { + closedSelectorExceptionCaught = true; + } + + assertTrue(closedSelectorExceptionCaught); + } + + public void testIOExceptionWhileSelect() throws IOException { + IOException ioException = new IOException(); + ((TestSelector) this.selector).setIOException(ioException); + + this.selector.singleLoop(); + + verify(handler).selectException(ioException); + } + + private static class TestSelector extends ESSelector { + + private ClosedSelectorException closedSelectorException; + private IOException ioException; + + protected TestSelector(EventHandler eventHandler) throws IOException { + super(eventHandler); + } + + @Override + void doSelect(int timeout) throws IOException, ClosedSelectorException { + if (closedSelectorException != null) { + throw closedSelectorException; + } + if (ioException != null) { + throw ioException; + } + } + + @Override + void cleanup() { + + } + + public void setClosedSelectorException(ClosedSelectorException exception) { + this.closedSelectorException = exception; + } + + public void setIOException(IOException ioException) { + this.ioException = ioException; + } + } + +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/NioClientTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/NioClientTests.java new file mode 100644 index 00000000000..e9f6dfe7f71 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/NioClientTests.java @@ -0,0 +1,193 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.nio.channel.ChannelFactory; +import org.elasticsearch.transport.nio.channel.CloseFuture; +import org.elasticsearch.transport.nio.channel.ConnectFuture; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class NioClientTests extends ESTestCase { + + private NioClient client; + private SocketSelector selector; + private ChannelFactory channelFactory; + private OpenChannels openChannels = new OpenChannels(logger); + private NioSocketChannel[] channels; + private DiscoveryNode node; + private Consumer listener; + private TransportAddress address; + + @Before + @SuppressWarnings("unchecked") + public void setUpClient() { + channelFactory = mock(ChannelFactory.class); + selector = mock(SocketSelector.class); + listener = mock(Consumer.class); + + ArrayList selectors = new ArrayList<>(); + selectors.add(selector); + Supplier selectorSupplier = new RoundRobinSelectorSupplier(selectors); + client = new NioClient(logger, openChannels, selectorSupplier, TimeValue.timeValueMillis(5), channelFactory); + + channels = new NioSocketChannel[2]; + address = new TransportAddress(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0)); + node = new DiscoveryNode("node-id", address, Version.CURRENT); + } + + public void testCreateConnections() throws IOException, InterruptedException { + NioSocketChannel channel1 = mock(NioSocketChannel.class); + ConnectFuture connectFuture1 = mock(ConnectFuture.class); + CloseFuture closeFuture1 = mock(CloseFuture.class); + NioSocketChannel channel2 = mock(NioSocketChannel.class); + ConnectFuture connectFuture2 = mock(ConnectFuture.class); + CloseFuture closeFuture2 = mock(CloseFuture.class); + + when(channelFactory.openNioChannel(address.address())).thenReturn(channel1, channel2); + when(channel1.getCloseFuture()).thenReturn(closeFuture1); + when(channel1.getConnectFuture()).thenReturn(connectFuture1); + when(channel2.getCloseFuture()).thenReturn(closeFuture2); + when(channel2.getConnectFuture()).thenReturn(connectFuture2); + when(connectFuture1.awaitConnectionComplete(5, TimeUnit.MILLISECONDS)).thenReturn(true); + when(connectFuture2.awaitConnectionComplete(5, TimeUnit.MILLISECONDS)).thenReturn(true); + + client.connectToChannels(node, channels, TimeValue.timeValueMillis(5), listener); + + verify(closeFuture1).setListener(listener); + verify(closeFuture2).setListener(listener); + verify(selector).registerSocketChannel(channel1); + verify(selector).registerSocketChannel(channel2); + + assertEquals(channel1, channels[0]); + assertEquals(channel2, channels[1]); + } + + public void testWithADifferentConnectTimeout() throws IOException, InterruptedException { + NioSocketChannel channel1 = mock(NioSocketChannel.class); + ConnectFuture connectFuture1 = mock(ConnectFuture.class); + CloseFuture closeFuture1 = mock(CloseFuture.class); + + when(channelFactory.openNioChannel(address.address())).thenReturn(channel1); + when(channel1.getCloseFuture()).thenReturn(closeFuture1); + when(channel1.getConnectFuture()).thenReturn(connectFuture1); + when(connectFuture1.awaitConnectionComplete(3, TimeUnit.MILLISECONDS)).thenReturn(true); + + channels = new NioSocketChannel[1]; + client.connectToChannels(node, channels, TimeValue.timeValueMillis(3), listener); + + verify(closeFuture1).setListener(listener); + verify(selector).registerSocketChannel(channel1); + + assertEquals(channel1, channels[0]); + } + + public void testConnectionTimeout() throws IOException, InterruptedException { + NioSocketChannel channel1 = mock(NioSocketChannel.class); + ConnectFuture connectFuture1 = mock(ConnectFuture.class); + CloseFuture closeFuture1 = mock(CloseFuture.class); + NioSocketChannel channel2 = mock(NioSocketChannel.class); + ConnectFuture connectFuture2 = mock(ConnectFuture.class); + CloseFuture closeFuture2 = mock(CloseFuture.class); + + when(channelFactory.openNioChannel(address.address())).thenReturn(channel1, channel2); + when(channel1.getCloseFuture()).thenReturn(closeFuture1); + when(channel1.getConnectFuture()).thenReturn(connectFuture1); + when(channel2.getCloseFuture()).thenReturn(closeFuture2); + when(channel2.getConnectFuture()).thenReturn(connectFuture2); + when(connectFuture1.awaitConnectionComplete(5, TimeUnit.MILLISECONDS)).thenReturn(true); + when(connectFuture2.awaitConnectionComplete(5, TimeUnit.MILLISECONDS)).thenReturn(false); + + try { + client.connectToChannels(node, channels, TimeValue.timeValueMillis(5), listener); + fail("Should have thrown ConnectTransportException"); + } catch (ConnectTransportException e) { + assertTrue(e.getMessage().contains("connect_timeout[5ms]")); + } + + verify(channel1).closeAsync(); + verify(channel2).closeAsync(); + + assertNull(channels[0]); + assertNull(channels[1]); + } + + public void testConnectionException() throws IOException, InterruptedException { + NioSocketChannel channel1 = mock(NioSocketChannel.class); + ConnectFuture connectFuture1 = mock(ConnectFuture.class); + CloseFuture closeFuture1 = mock(CloseFuture.class); + NioSocketChannel channel2 = mock(NioSocketChannel.class); + ConnectFuture connectFuture2 = mock(ConnectFuture.class); + CloseFuture closeFuture2 = mock(CloseFuture.class); + IOException ioException = new IOException(); + + when(channelFactory.openNioChannel(address.address())).thenReturn(channel1, channel2); + when(channel1.getCloseFuture()).thenReturn(closeFuture1); + when(channel1.getConnectFuture()).thenReturn(connectFuture1); + when(channel2.getCloseFuture()).thenReturn(closeFuture2); + when(channel2.getConnectFuture()).thenReturn(connectFuture2); + when(connectFuture1.awaitConnectionComplete(5, TimeUnit.MILLISECONDS)).thenReturn(true); + when(connectFuture2.awaitConnectionComplete(5, TimeUnit.MILLISECONDS)).thenReturn(false); + when(connectFuture2.getException()).thenReturn(ioException); + + try { + client.connectToChannels(node, channels, TimeValue.timeValueMillis(5), listener); + fail("Should have thrown ConnectTransportException"); + } catch (ConnectTransportException e) { + assertTrue(e.getMessage().contains("connect_exception")); + assertSame(ioException, e.getCause()); + } + + verify(channel1).closeAsync(); + verify(channel2).closeAsync(); + + assertNull(channels[0]); + assertNull(channels[1]); + } + + public void testCloseDoesNotAllowConnections() throws IOException { + client.close(); + + assertFalse(client.connectToChannels(node, channels, TimeValue.timeValueMillis(5), listener)); + + for (NioSocketChannel channel : channels) { + assertNull(channel); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java new file mode 100644 index 00000000000..bd054643020 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportSettings; +import org.elasticsearch.transport.nio.channel.NioChannel; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Collections; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase { + + public static MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + ClusterSettings clusterSettings, boolean doHandshake) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + NetworkService networkService = new NetworkService(settings, Collections.emptyList()); + Transport transport = new NioTransport(settings, threadPool, + networkService, + BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { + + @Override + protected Version executeHandshake(DiscoveryNode node, NioChannel channel, TimeValue timeout) throws IOException, + InterruptedException { + if (doHandshake) { + return super.executeHandshake(node, channel, timeout); + } else { + return version.minimumCompatibilityVersion(); + } + } + + @Override + protected Version getCurrentVersion() { + return version; + } + + @Override + protected SocketEventHandler getSocketEventHandler() { + return new TestingSocketEventHandler(logger, this::exceptionCaught); + } + }; + MockTransportService mockTransportService = + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings); + mockTransportService.start(); + return mockTransportService; + } + + @Override + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { + settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build(); + MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); + transportService.start(); + return transportService; + } + + public void testConnectException() throws UnknownHostException { + try { + serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), + emptyMap(), emptySet(),Version.CURRENT)); + fail("Expected ConnectTransportException"); + } catch (ConnectTransportException e) { + assertThat(e.getMessage(), containsString("connect_exception")); + assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); + Throwable cause = e.getCause(); + assertThat(cause, instanceOf(IOException.class)); + } + } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nioFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SocketEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SocketEventHandlerTests.java new file mode 100644 index 00000000000..393b9dc7cc5 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SocketEventHandlerTests.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.channel.CloseFuture; +import org.elasticsearch.transport.nio.channel.DoNotRegisterChannel; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.elasticsearch.transport.nio.channel.ReadContext; +import org.elasticsearch.transport.nio.channel.SelectionKeyUtils; +import org.elasticsearch.transport.nio.channel.TcpWriteContext; +import org.junit.Before; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.function.BiConsumer; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class SocketEventHandlerTests extends ESTestCase { + + private BiConsumer exceptionHandler; + + private SocketEventHandler handler; + private NioSocketChannel channel; + private ReadContext readContext; + private SocketChannel rawChannel; + + @Before + @SuppressWarnings("unchecked") + public void setUpHandler() throws IOException { + exceptionHandler = mock(BiConsumer.class); + SocketSelector socketSelector = mock(SocketSelector.class); + handler = new SocketEventHandler(logger, exceptionHandler); + rawChannel = mock(SocketChannel.class); + channel = new DoNotRegisterChannel("", rawChannel); + readContext = mock(ReadContext.class); + when(rawChannel.finishConnect()).thenReturn(true); + + channel.setContexts(readContext, new TcpWriteContext(channel)); + channel.register(socketSelector); + channel.finishConnect(); + + when(socketSelector.isOnCurrentThread()).thenReturn(true); + } + + public void testRegisterAddsOP_CONNECTAndOP_READInterest() throws IOException { + handler.handleRegistration(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT, channel.getSelectionKey().interestOps()); + } + + public void testRegistrationExceptionCallsExceptionHandler() throws IOException { + CancelledKeyException exception = new CancelledKeyException(); + handler.registrationException(channel, exception); + verify(exceptionHandler).accept(channel, exception); + } + + public void testConnectRemovesOP_CONNECTInterest() throws IOException { + SelectionKeyUtils.setConnectAndReadInterested(channel); + handler.handleConnect(channel); + assertEquals(SelectionKey.OP_READ, channel.getSelectionKey().interestOps()); + } + + public void testConnectExceptionCallsExceptionHandler() throws IOException { + IOException exception = new IOException(); + handler.connectException(channel, exception); + verify(exceptionHandler).accept(channel, exception); + } + + public void testHandleReadDelegatesToReadContext() throws IOException { + when(readContext.read()).thenReturn(1); + + handler.handleRead(channel); + + verify(readContext).read(); + } + + public void testHandleReadMarksChannelForCloseIfPeerClosed() throws IOException { + NioSocketChannel nioSocketChannel = mock(NioSocketChannel.class); + CloseFuture closeFuture = mock(CloseFuture.class); + when(nioSocketChannel.getReadContext()).thenReturn(readContext); + when(readContext.read()).thenReturn(-1); + when(nioSocketChannel.getCloseFuture()).thenReturn(closeFuture); + when(closeFuture.isDone()).thenReturn(true); + + handler.handleRead(nioSocketChannel); + + verify(nioSocketChannel).closeFromSelector(); + } + + public void testReadExceptionCallsExceptionHandler() throws IOException { + IOException exception = new IOException(); + handler.readException(channel, exception); + verify(exceptionHandler).accept(channel, exception); + } + + @SuppressWarnings("unchecked") + public void testHandleWriteWithCompleteFlushRemovesOP_WRITEInterest() throws IOException { + SelectionKey selectionKey = channel.getSelectionKey(); + setWriteAndRead(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + + BytesArray bytesArray = new BytesArray(new byte[1]); + NetworkBytesReference networkBuffer = NetworkBytesReference.wrap(bytesArray); + channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, networkBuffer, mock(ActionListener.class))); + + when(rawChannel.write(ByteBuffer.wrap(bytesArray.array()))).thenReturn(1); + handler.handleWrite(channel); + + assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); + } + + @SuppressWarnings("unchecked") + public void testHandleWriteWithInCompleteFlushLeavesOP_WRITEInterest() throws IOException { + SelectionKey selectionKey = channel.getSelectionKey(); + setWriteAndRead(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + + BytesArray bytesArray = new BytesArray(new byte[1]); + NetworkBytesReference networkBuffer = NetworkBytesReference.wrap(bytesArray, 1, 0); + channel.getWriteContext().queueWriteOperations(new WriteOperation(channel, networkBuffer, mock(ActionListener.class))); + + when(rawChannel.write(ByteBuffer.wrap(bytesArray.array()))).thenReturn(0); + handler.handleWrite(channel); + + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, selectionKey.interestOps()); + } + + public void testHandleWriteWithNoOpsRemovesOP_WRITEInterest() throws IOException { + SelectionKey selectionKey = channel.getSelectionKey(); + setWriteAndRead(channel); + assertEquals(SelectionKey.OP_READ | SelectionKey.OP_WRITE, channel.getSelectionKey().interestOps()); + + handler.handleWrite(channel); + + assertEquals(SelectionKey.OP_READ, selectionKey.interestOps()); + } + + private void setWriteAndRead(NioChannel channel) { + SelectionKeyUtils.setConnectAndReadInterested(channel); + SelectionKeyUtils.removeConnectInterested(channel); + SelectionKeyUtils.setWriteInterested(channel); + } + + public void testWriteExceptionCallsExceptionHandler() throws IOException { + IOException exception = new IOException(); + handler.writeException(channel, exception); + verify(exceptionHandler).accept(channel, exception); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SocketSelectorTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SocketSelectorTests.java new file mode 100644 index 00000000000..050cf856442 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SocketSelectorTests.java @@ -0,0 +1,336 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.elasticsearch.transport.nio.channel.WriteContext; +import org.elasticsearch.transport.nio.utils.TestSelectionKey; +import org.junit.Before; + +import java.io.IOException; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ClosedSelectorException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.HashSet; +import java.util.Set; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class SocketSelectorTests extends ESTestCase { + + private SocketSelector socketSelector; + private SocketEventHandler eventHandler; + private NioSocketChannel channel; + private TestSelectionKey selectionKey; + private WriteContext writeContext; + private HashSet keySet = new HashSet<>(); + private ActionListener listener; + private NetworkBytesReference bufferReference = NetworkBytesReference.wrap(new BytesArray(new byte[1])); + + @Before + @SuppressWarnings("unchecked") + public void setUp() throws Exception { + super.setUp(); + eventHandler = mock(SocketEventHandler.class); + channel = mock(NioSocketChannel.class); + writeContext = mock(WriteContext.class); + listener = mock(ActionListener.class); + selectionKey = new TestSelectionKey(0); + selectionKey.attach(channel); + Selector rawSelector = mock(Selector.class); + + this.socketSelector = new SocketSelector(eventHandler, rawSelector); + this.socketSelector.setThread(); + + when(rawSelector.selectedKeys()).thenReturn(keySet); + when(rawSelector.select(0)).thenReturn(1); + when(channel.getSelectionKey()).thenReturn(selectionKey); + when(channel.getWriteContext()).thenReturn(writeContext); + when(channel.isConnectComplete()).thenReturn(true); + } + + public void testRegisterChannel() throws Exception { + socketSelector.registerSocketChannel(channel); + + when(channel.register(socketSelector)).thenReturn(true); + + socketSelector.doSelect(0); + + verify(eventHandler).handleRegistration(channel); + + Set registeredChannels = socketSelector.getRegisteredChannels(); + assertEquals(1, registeredChannels.size()); + assertTrue(registeredChannels.contains(channel)); + } + + public void testRegisterChannelFails() throws Exception { + socketSelector.registerSocketChannel(channel); + + when(channel.register(socketSelector)).thenReturn(false); + + socketSelector.doSelect(0); + + verify(channel, times(0)).finishConnect(); + + Set registeredChannels = socketSelector.getRegisteredChannels(); + assertEquals(0, registeredChannels.size()); + assertFalse(registeredChannels.contains(channel)); + } + + public void testRegisterChannelFailsDueToException() throws Exception { + socketSelector.registerSocketChannel(channel); + + ClosedChannelException closedChannelException = new ClosedChannelException(); + when(channel.register(socketSelector)).thenThrow(closedChannelException); + + socketSelector.doSelect(0); + + verify(eventHandler).registrationException(channel, closedChannelException); + verify(channel, times(0)).finishConnect(); + + Set registeredChannels = socketSelector.getRegisteredChannels(); + assertEquals(0, registeredChannels.size()); + assertFalse(registeredChannels.contains(channel)); + } + + public void testSuccessfullyRegisterChannelWillConnect() throws Exception { + socketSelector.registerSocketChannel(channel); + + when(channel.register(socketSelector)).thenReturn(true); + when(channel.finishConnect()).thenReturn(true); + + socketSelector.doSelect(0); + + verify(eventHandler).handleConnect(channel); + } + + public void testConnectIncompleteWillNotNotify() throws Exception { + socketSelector.registerSocketChannel(channel); + + when(channel.register(socketSelector)).thenReturn(true); + when(channel.finishConnect()).thenReturn(false); + + socketSelector.doSelect(0); + + verify(eventHandler, times(0)).handleConnect(channel); + } + + public void testQueueWriteWhenNotRunning() throws Exception { + socketSelector.close(false); + + socketSelector.queueWrite(new WriteOperation(channel, bufferReference, listener)); + + verify(listener).onFailure(any(ClosedSelectorException.class)); + } + + public void testQueueWriteChannelIsNoLongerWritable() throws Exception { + WriteOperation writeOperation = new WriteOperation(channel, bufferReference, listener); + socketSelector.queueWrite(writeOperation); + + when(channel.isWritable()).thenReturn(false); + socketSelector.doSelect(0); + + verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(listener).onFailure(any(ClosedChannelException.class)); + } + + public void testQueueWriteSelectionKeyThrowsException() throws Exception { + SelectionKey selectionKey = mock(SelectionKey.class); + + WriteOperation writeOperation = new WriteOperation(channel, bufferReference, listener); + CancelledKeyException cancelledKeyException = new CancelledKeyException(); + socketSelector.queueWrite(writeOperation); + + when(channel.isWritable()).thenReturn(true); + when(channel.getSelectionKey()).thenReturn(selectionKey); + when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); + socketSelector.doSelect(0); + + verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(listener).onFailure(cancelledKeyException); + } + + public void testQueueWriteSuccessful() throws Exception { + WriteOperation writeOperation = new WriteOperation(channel, bufferReference, listener); + socketSelector.queueWrite(writeOperation); + + assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); + + when(channel.isWritable()).thenReturn(true); + socketSelector.doSelect(0); + + verify(writeContext).queueWriteOperations(writeOperation); + assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); + } + + public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { + WriteOperation writeOperation = new WriteOperation(channel, bufferReference, listener); + + assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); + + when(channel.isWritable()).thenReturn(true); + socketSelector.queueWriteInChannelBuffer(writeOperation); + + verify(writeContext).queueWriteOperations(writeOperation); + assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); + } + + public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { + SelectionKey selectionKey = mock(SelectionKey.class); + + WriteOperation writeOperation = new WriteOperation(channel, bufferReference, listener); + CancelledKeyException cancelledKeyException = new CancelledKeyException(); + + when(channel.isWritable()).thenReturn(true); + when(channel.getSelectionKey()).thenReturn(selectionKey); + when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); + socketSelector.queueWriteInChannelBuffer(writeOperation); + + verify(writeContext, times(0)).queueWriteOperations(writeOperation); + verify(listener).onFailure(cancelledKeyException); + } + + public void testConnectEvent() throws Exception { + keySet.add(selectionKey); + + selectionKey.setReadyOps(SelectionKey.OP_CONNECT); + + when(channel.finishConnect()).thenReturn(true); + socketSelector.doSelect(0); + + verify(eventHandler).handleConnect(channel); + } + + public void testConnectEventFinishUnsuccessful() throws Exception { + keySet.add(selectionKey); + + selectionKey.setReadyOps(SelectionKey.OP_CONNECT); + + when(channel.finishConnect()).thenReturn(false); + socketSelector.doSelect(0); + + verify(eventHandler, times(0)).handleConnect(channel); + } + + public void testConnectEventFinishThrowException() throws Exception { + keySet.add(selectionKey); + IOException ioException = new IOException(); + + selectionKey.setReadyOps(SelectionKey.OP_CONNECT); + + when(channel.finishConnect()).thenThrow(ioException); + socketSelector.doSelect(0); + + verify(eventHandler, times(0)).handleConnect(channel); + verify(eventHandler).connectException(channel, ioException); + } + + public void testWillNotConsiderWriteOrReadUntilConnectionComplete() throws Exception { + keySet.add(selectionKey); + IOException ioException = new IOException(); + + selectionKey.setReadyOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + + doThrow(ioException).when(eventHandler).handleWrite(channel); + + when(channel.isConnectComplete()).thenReturn(false); + socketSelector.doSelect(0); + + verify(eventHandler, times(0)).handleWrite(channel); + verify(eventHandler, times(0)).handleRead(channel); + } + + public void testSuccessfulWriteEvent() throws Exception { + keySet.add(selectionKey); + + selectionKey.setReadyOps(SelectionKey.OP_WRITE); + + socketSelector.doSelect(0); + + verify(eventHandler).handleWrite(channel); + } + + public void testWriteEventWithException() throws Exception { + keySet.add(selectionKey); + IOException ioException = new IOException(); + + selectionKey.setReadyOps(SelectionKey.OP_WRITE); + + doThrow(ioException).when(eventHandler).handleWrite(channel); + + socketSelector.doSelect(0); + + verify(eventHandler).writeException(channel, ioException); + } + + public void testSuccessfulReadEvent() throws Exception { + keySet.add(selectionKey); + + selectionKey.setReadyOps(SelectionKey.OP_READ); + + socketSelector.doSelect(0); + + verify(eventHandler).handleRead(channel); + } + + public void testReadEventWithException() throws Exception { + keySet.add(selectionKey); + IOException ioException = new IOException(); + + selectionKey.setReadyOps(SelectionKey.OP_READ); + + doThrow(ioException).when(eventHandler).handleRead(channel); + + socketSelector.doSelect(0); + + verify(eventHandler).readException(channel, ioException); + } + + public void testCleanup() throws Exception { + NioSocketChannel unRegisteredChannel = mock(NioSocketChannel.class); + + when(channel.register(socketSelector)).thenReturn(true); + socketSelector.registerSocketChannel(channel); + + socketSelector.doSelect(0); + + NetworkBytesReference networkBuffer = NetworkBytesReference.wrap(new BytesArray(new byte[1])); + socketSelector.queueWrite(new WriteOperation(mock(NioSocketChannel.class), networkBuffer, listener)); + socketSelector.registerSocketChannel(unRegisteredChannel); + + socketSelector.cleanup(); + + verify(listener).onFailure(any(ClosedSelectorException.class)); + verify(eventHandler).handleClose(channel); + verify(eventHandler).handleClose(unRegisteredChannel); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java new file mode 100644 index 00000000000..29f595c87a5 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; +import java.util.WeakHashMap; +import java.util.function.BiConsumer; + +public class TestingSocketEventHandler extends SocketEventHandler { + + private final Logger logger; + + public TestingSocketEventHandler(Logger logger, BiConsumer exceptionHandler) { + super(logger, exceptionHandler); + this.logger = logger; + } + + private Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>()); + + public void handleConnect(NioSocketChannel channel) { + assert hasConnectedMap.contains(channel) == false : "handleConnect should only be called once per channel"; + hasConnectedMap.add(channel); + super.handleConnect(channel); + } + + private Set hasConnectExceptionMap = Collections.newSetFromMap(new WeakHashMap<>()); + + public void connectException(NioSocketChannel channel, Exception e) { + assert hasConnectExceptionMap.contains(channel) == false : "connectException should only called at maximum once per channel"; + hasConnectExceptionMap.add(channel); + super.connectException(channel, e); + } + + public void handleRead(NioSocketChannel channel) throws IOException { + super.handleRead(channel); + } + + public void readException(NioSocketChannel channel, Exception e) { + super.readException(channel, e); + } + + public void handleWrite(NioSocketChannel channel) throws IOException { + super.handleWrite(channel); + } + + public void writeException(NioSocketChannel channel, Exception e) { + super.writeException(channel, e); + } + +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/WriteOperationTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/WriteOperationTests.java new file mode 100644 index 00000000000..d7284491d64 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/WriteOperationTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.channel.NioChannel; +import org.elasticsearch.transport.nio.channel.NioSocketChannel; +import org.junit.Before; + +import java.io.IOException; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class WriteOperationTests extends ESTestCase { + + private NioSocketChannel channel; + private ActionListener listener; + + @Before + @SuppressWarnings("unchecked") + public void setFields() { + channel = mock(NioSocketChannel.class); + listener = mock(ActionListener.class); + + } + + public void testFlush() throws IOException { + WriteOperation writeOp = new WriteOperation(channel, new BytesArray(new byte[10]), listener); + + + when(channel.write(any())).thenAnswer(invocationOnMock -> { + NetworkBytesReference[] refs = (NetworkBytesReference[]) invocationOnMock.getArguments()[0]; + refs[0].incrementRead(10); + return 10; + }); + + writeOp.flush(); + + assertTrue(writeOp.isFullyFlushed()); + } + + public void testPartialFlush() throws IOException { + WriteOperation writeOp = new WriteOperation(channel, new BytesArray(new byte[10]), listener); + + when(channel.write(any())).thenAnswer(invocationOnMock -> { + NetworkBytesReference[] refs = (NetworkBytesReference[]) invocationOnMock.getArguments()[0]; + refs[0].incrementRead(5); + return 5; + }); + + writeOp.flush(); + + assertFalse(writeOp.isFullyFlushed()); + assertEquals(5, writeOp.getByteReferences()[0].getReadRemaining()); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/AbstractNioChannelTestCase.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/AbstractNioChannelTestCase.java new file mode 100644 index 00000000000..c3909a06440 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/AbstractNioChannelTestCase.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.MockServerSocket; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.TcpReadHandler; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.Mockito.mock; + +public abstract class AbstractNioChannelTestCase extends ESTestCase { + + ChannelFactory channelFactory = new ChannelFactory(Settings.EMPTY, mock(TcpReadHandler.class)); + MockServerSocket mockServerSocket; + private Thread serverThread; + + @Before + public void serverSocketSetup() throws IOException { + mockServerSocket = new MockServerSocket(0); + serverThread = new Thread(() -> { + while (!mockServerSocket.isClosed()) { + try { + Socket socket = mockServerSocket.accept(); + InputStream inputStream = socket.getInputStream(); + socket.close(); + } catch (IOException e) { + } + } + }); + serverThread.start(); + } + + @After + public void serverSocketTearDown() throws IOException { + serverThread.interrupt(); + mockServerSocket.close(); + } + + public abstract NioChannel channelToClose() throws IOException; + + public void testClose() throws IOException, TimeoutException, InterruptedException { + AtomicReference ref = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + + NioChannel socketChannel = channelToClose(); + CloseFuture closeFuture = socketChannel.getCloseFuture(); + closeFuture.setListener((c) -> {ref.set(c); latch.countDown();}); + + assertFalse(closeFuture.isClosed()); + assertTrue(socketChannel.getRawChannel().isOpen()); + + socketChannel.closeAsync(); + + closeFuture.awaitClose(100, TimeUnit.SECONDS); + + assertFalse(socketChannel.getRawChannel().isOpen()); + assertTrue(closeFuture.isClosed()); + latch.await(); + assertSame(socketChannel, ref.get()); + } + + protected Runnable wrappedRunnable(CheckedRunnable runnable) { + return () -> { + try { + runnable.run(); + } catch (Exception e) { + } + }; + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/DoNotRegisterChannel.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/DoNotRegisterChannel.java new file mode 100644 index 00000000000..38f381bfcc5 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/DoNotRegisterChannel.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.transport.nio.ESSelector; +import org.elasticsearch.transport.nio.utils.TestSelectionKey; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SocketChannel; + +public class DoNotRegisterChannel extends NioSocketChannel { + + public DoNotRegisterChannel(String profile, SocketChannel socketChannel) throws IOException { + super(profile, socketChannel); + } + + @Override + public boolean register(ESSelector selector) throws ClosedChannelException { + if (markRegistered(selector)) { + setSelectionKey(new TestSelectionKey(0)); + return true; + } else { + return false; + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/DoNotRegisterServerChannel.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/DoNotRegisterServerChannel.java new file mode 100644 index 00000000000..e9e1fc207a0 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/DoNotRegisterServerChannel.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.transport.nio.ESSelector; +import org.elasticsearch.transport.nio.utils.TestSelectionKey; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ServerSocketChannel; + +public class DoNotRegisterServerChannel extends NioServerSocketChannel { + + public DoNotRegisterServerChannel(String profile, ServerSocketChannel channel, ChannelFactory channelFactory) throws IOException { + super(profile, channel, channelFactory); + } + + @Override + public boolean register(ESSelector selector) throws ClosedChannelException { + if (markRegistered(selector)) { + setSelectionKey(new TestSelectionKey(0)); + return true; + } else { + return false; + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java new file mode 100644 index 00000000000..c991263562c --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioServerSocketChannelTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; + +public class NioServerSocketChannelTests extends AbstractNioChannelTestCase { + + @Override + public NioChannel channelToClose() throws IOException { + return channelFactory.openNioServerSocketChannel("nio", new InetSocketAddress(InetAddress.getLoopbackAddress(),0)); + } + +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java new file mode 100644 index 00000000000..d195e835699 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/NioSocketChannelTests.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.LockSupport; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class NioSocketChannelTests extends AbstractNioChannelTestCase { + + private InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); + + @Override + public NioChannel channelToClose() throws IOException { + return channelFactory.openNioChannel(new InetSocketAddress(loopbackAddress, mockServerSocket.getLocalPort())); + } + + public void testConnectSucceeds() throws IOException, InterruptedException { + InetSocketAddress remoteAddress = new InetSocketAddress(loopbackAddress, mockServerSocket.getLocalPort()); + NioSocketChannel socketChannel = channelFactory.openNioChannel(remoteAddress); + Thread thread = new Thread(wrappedRunnable(() -> ensureConnect(socketChannel))); + thread.start(); + ConnectFuture connectFuture = socketChannel.getConnectFuture(); + connectFuture.awaitConnectionComplete(100, TimeUnit.SECONDS); + + assertTrue(socketChannel.isConnectComplete()); + assertTrue(socketChannel.isOpen()); + assertFalse(connectFuture.connectFailed()); + assertNull(connectFuture.getException()); + + thread.join(); + } + + public void testConnectFails() throws IOException, InterruptedException { + mockServerSocket.close(); + InetSocketAddress remoteAddress = new InetSocketAddress(loopbackAddress, mockServerSocket.getLocalPort()); + NioSocketChannel socketChannel = channelFactory.openNioChannel(remoteAddress); + Thread thread = new Thread(wrappedRunnable(() -> ensureConnect(socketChannel))); + thread.start(); + ConnectFuture connectFuture = socketChannel.getConnectFuture(); + connectFuture.awaitConnectionComplete(100, TimeUnit.SECONDS); + + assertFalse(socketChannel.isConnectComplete()); + // Even if connection fails the channel is 'open' until close() is called + assertTrue(socketChannel.isOpen()); + assertTrue(connectFuture.connectFailed()); + assertThat(connectFuture.getException(), instanceOf(ConnectException.class)); + assertThat(connectFuture.getException().getMessage(), containsString("Connection refused")); + + thread.join(); + } + + private void ensureConnect(NioSocketChannel nioSocketChannel) throws IOException { + for (;;) { + boolean isConnected = nioSocketChannel.finishConnect(); + if (isConnected) { + return; + } + LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(1)); + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpFrameDecoderTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpFrameDecoderTests.java new file mode 100644 index 00000000000..519828592be --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpFrameDecoderTests.java @@ -0,0 +1,169 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TcpTransport; + +import java.io.IOException; +import java.io.StreamCorruptedException; + +import static org.hamcrest.Matchers.instanceOf; + +public class TcpFrameDecoderTests extends ESTestCase { + + private TcpFrameDecoder frameDecoder = new TcpFrameDecoder(); + + public void testDefaultExceptedMessageLengthIsNegative1() { + assertEquals(-1, frameDecoder.expectedMessageLength()); + } + + public void testDecodeWithIncompleteHeader() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.write(1); + streamOutput.write(1); + streamOutput.write(0); + streamOutput.write(0); + + assertNull(frameDecoder.decode(streamOutput.bytes(), 4)); + assertEquals(-1, frameDecoder.expectedMessageLength()); + } + + public void testDecodePing() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(-1); + + BytesReference message = frameDecoder.decode(streamOutput.bytes(), 6); + + assertEquals(-1, frameDecoder.expectedMessageLength()); + assertEquals(streamOutput.bytes(), message); + } + + public void testDecodePingWithStartOfSecondMessage() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(-1); + streamOutput.write('E'); + streamOutput.write('S'); + + BytesReference message = frameDecoder.decode(streamOutput.bytes(), 8); + + assertEquals(6, message.length()); + assertEquals(streamOutput.bytes().slice(0, 6), message); + } + + public void testDecodeMessage() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(2); + streamOutput.write('M'); + streamOutput.write('A'); + + BytesReference message = frameDecoder.decode(streamOutput.bytes(), 8); + + assertEquals(-1, frameDecoder.expectedMessageLength()); + assertEquals(streamOutput.bytes(), message); + } + + public void testDecodeIncompleteMessage() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(3); + streamOutput.write('M'); + streamOutput.write('A'); + + BytesReference message = frameDecoder.decode(streamOutput.bytes(), 8); + + assertEquals(9, frameDecoder.expectedMessageLength()); + assertNull(message); + } + + public void testInvalidLength() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(-2); + streamOutput.write('M'); + streamOutput.write('A'); + + try { + frameDecoder.decode(streamOutput.bytes(), 8); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(StreamCorruptedException.class)); + assertEquals("invalid data length: -2", ex.getMessage()); + } + } + + public void testInvalidHeader() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('C'); + byte byte1 = randomByte(); + byte byte2 = randomByte(); + streamOutput.write(byte1); + streamOutput.write(byte2); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + + try { + frameDecoder.decode(streamOutput.bytes(), 7); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(StreamCorruptedException.class)); + String expected = "invalid internal transport message format, got (45,43," + + Integer.toHexString(byte1 & 0xFF) + "," + + Integer.toHexString(byte2 & 0xFF) + ")"; + assertEquals(expected, ex.getMessage()); + } + } + + public void testHTTPHeader() throws IOException { + String[] httpHeaders = {"GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE"}; + + for (String httpHeader : httpHeaders) { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + + for (char c : httpHeader.toCharArray()) { + streamOutput.write((byte) c); + } + streamOutput.write(new byte[6]); + + try { + BytesReference bytes = streamOutput.bytes(); + frameDecoder.decode(bytes, bytes.length()); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(TcpTransport.HttpOnTransportException.class)); + assertEquals("This is not a HTTP port", ex.getMessage()); + } + } + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpReadContextTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpReadContextTests.java new file mode 100644 index 00000000000..fc8d7e48ab0 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpReadContextTests.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.NetworkBytesReference; +import org.elasticsearch.transport.nio.TcpReadHandler; +import org.junit.Before; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class TcpReadContextTests extends ESTestCase { + + private static String PROFILE = "profile"; + private TcpReadHandler handler; + private int messageLength; + private NioSocketChannel channel; + private TcpReadContext readContext; + + @Before + public void init() throws IOException { + handler = mock(TcpReadHandler.class); + + messageLength = randomInt(96) + 4; + channel = mock(NioSocketChannel.class); + readContext = new TcpReadContext(channel, handler); + + when(channel.getProfile()).thenReturn(PROFILE); + } + + public void testSuccessfulRead() throws IOException { + byte[] bytes = createMessage(messageLength); + byte[] fullMessage = combineMessageAndHeader(bytes); + + final AtomicInteger bufferCapacity = new AtomicInteger(); + when(channel.read(any(NetworkBytesReference.class))).thenAnswer(invocationOnMock -> { + NetworkBytesReference reference = (NetworkBytesReference) invocationOnMock.getArguments()[0]; + ByteBuffer buffer = reference.getWriteByteBuffer(); + bufferCapacity.set(reference.getWriteRemaining()); + buffer.put(fullMessage); + reference.incrementWrite(fullMessage.length); + return fullMessage.length; + }); + + readContext.read(); + + verify(handler).handleMessage(new BytesArray(bytes), channel, PROFILE, messageLength); + assertEquals(1024 * 16, bufferCapacity.get()); + + BytesArray bytesArray = new BytesArray(new byte[10]); + bytesArray.slice(5, 5); + bytesArray.slice(5, 0); + } + + public void testPartialRead() throws IOException { + byte[] part1 = createMessage(messageLength); + byte[] fullPart1 = combineMessageAndHeader(part1, messageLength + messageLength); + byte[] part2 = createMessage(messageLength); + + final AtomicInteger bufferCapacity = new AtomicInteger(); + final AtomicReference bytes = new AtomicReference<>(); + + when(channel.read(any(NetworkBytesReference.class))).thenAnswer(invocationOnMock -> { + NetworkBytesReference reference = (NetworkBytesReference) invocationOnMock.getArguments()[0]; + ByteBuffer buffer = reference.getWriteByteBuffer(); + bufferCapacity.set(reference.getWriteRemaining()); + buffer.put(bytes.get()); + reference.incrementWrite(bytes.get().length); + return bytes.get().length; + }); + + + bytes.set(fullPart1); + readContext.read(); + + assertEquals(1024 * 16, bufferCapacity.get()); + verifyZeroInteractions(handler); + + bytes.set(part2); + readContext.read(); + + assertEquals(1024 * 16 - fullPart1.length, bufferCapacity.get()); + + CompositeBytesReference reference = new CompositeBytesReference(new BytesArray(part1), new BytesArray(part2)); + verify(handler).handleMessage(reference, channel, PROFILE, messageLength + messageLength); + } + + public void testReadThrowsIOException() throws IOException { + IOException ioException = new IOException(); + when(channel.read(any())).thenThrow(ioException); + + try { + readContext.read(); + fail("Expected exception"); + } catch (Exception ex) { + assertSame(ioException, ex); + } + } + + private static byte[] combineMessageAndHeader(byte[] bytes) { + return combineMessageAndHeader(bytes, bytes.length); + } + + private static byte[] combineMessageAndHeader(byte[] bytes, int messageLength) { + byte[] fullMessage = new byte[bytes.length + 6]; + ByteBuffer wrapped = ByteBuffer.wrap(fullMessage); + wrapped.put((byte) 'E'); + wrapped.put((byte) 'S'); + wrapped.putInt(messageLength); + wrapped.put(bytes); + return fullMessage; + } + + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } + +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpWriteContextTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpWriteContextTests.java new file mode 100644 index 00000000000..d2a2f446e73 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/channel/TcpWriteContextTests.java @@ -0,0 +1,296 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.channel; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.nio.SocketSelector; +import org.elasticsearch.transport.nio.WriteOperation; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SocketChannel; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TcpWriteContextTests extends ESTestCase { + + private SocketSelector selector; + private ActionListener listener; + private TcpWriteContext writeContext; + private NioSocketChannel channel; + + @Before + @SuppressWarnings("unchecked") + public void setUp() throws Exception { + super.setUp(); + selector = mock(SocketSelector.class); + listener = mock(ActionListener.class); + channel = mock(NioSocketChannel.class); + writeContext = new TcpWriteContext(channel); + + when(channel.getSelector()).thenReturn(selector); + when(selector.isOnCurrentThread()).thenReturn(true); + } + + public void testWriteFailsIfChannelNotWritable() throws Exception { + when(channel.isWritable()).thenReturn(false); + + writeContext.sendMessage(new BytesArray(generateBytes(10)), listener); + + verify(listener).onFailure(any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + byte[] bytes = generateBytes(10); + BytesArray bytesArray = new BytesArray(bytes); + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + when(channel.isWritable()).thenReturn(true); + + writeContext.sendMessage(bytesArray, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + WriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(ByteBuffer.wrap(bytes), writeOp.getByteReferences()[0].getReadByteBuffer()); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() throws Exception { + byte[] bytes = generateBytes(10); + BytesArray bytesArray = new BytesArray(bytes); + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); + + when(channel.isWritable()).thenReturn(true); + + writeContext.sendMessage(bytesArray, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + WriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(channel, writeOp.getChannel()); + assertEquals(ByteBuffer.wrap(bytes), writeOp.getByteReferences()[0].getReadByteBuffer()); + } + + public void testWriteIsQueuedInChannel() throws Exception { + assertFalse(writeContext.hasQueuedWriteOps()); + + writeContext.queueWriteOperations(new WriteOperation(channel, new BytesArray(generateBytes(10)), listener)); + + assertTrue(writeContext.hasQueuedWriteOps()); + } + + public void testWriteOpsCanBeCleared() throws Exception { + assertFalse(writeContext.hasQueuedWriteOps()); + + writeContext.queueWriteOperations(new WriteOperation(channel, new BytesArray(generateBytes(10)), listener)); + + assertTrue(writeContext.hasQueuedWriteOps()); + + ClosedChannelException e = new ClosedChannelException(); + writeContext.clearQueuedWriteOps(e); + + verify(listener).onFailure(e); + + assertFalse(writeContext.hasQueuedWriteOps()); + } + + public void testQueuedWriteIsFlushedInFlushCall() throws Exception { + assertFalse(writeContext.hasQueuedWriteOps()); + + WriteOperation writeOperation = mock(WriteOperation.class); + writeContext.queueWriteOperations(writeOperation); + + assertTrue(writeContext.hasQueuedWriteOps()); + + when(writeOperation.isFullyFlushed()).thenReturn(true); + when(writeOperation.getListener()).thenReturn(listener); + writeContext.flushChannel(); + + verify(writeOperation).flush(); + verify(listener).onResponse(channel); + assertFalse(writeContext.hasQueuedWriteOps()); + } + + public void testPartialFlush() throws IOException { + assertFalse(writeContext.hasQueuedWriteOps()); + + WriteOperation writeOperation = mock(WriteOperation.class); + writeContext.queueWriteOperations(writeOperation); + + assertTrue(writeContext.hasQueuedWriteOps()); + + when(writeOperation.isFullyFlushed()).thenReturn(false); + writeContext.flushChannel(); + + verify(listener, times(0)).onResponse(channel); + assertTrue(writeContext.hasQueuedWriteOps()); + } + + @SuppressWarnings("unchecked") + public void testMultipleWritesPartialFlushes() throws IOException { + assertFalse(writeContext.hasQueuedWriteOps()); + + ActionListener listener2 = mock(ActionListener.class); + WriteOperation writeOperation1 = mock(WriteOperation.class); + WriteOperation writeOperation2 = mock(WriteOperation.class); + when(writeOperation1.getListener()).thenReturn(listener); + when(writeOperation2.getListener()).thenReturn(listener2); + writeContext.queueWriteOperations(writeOperation1); + writeContext.queueWriteOperations(writeOperation2); + + assertTrue(writeContext.hasQueuedWriteOps()); + + when(writeOperation1.isFullyFlushed()).thenReturn(true); + when(writeOperation2.isFullyFlushed()).thenReturn(false); + writeContext.flushChannel(); + + verify(listener).onResponse(channel); + verify(listener2, times(0)).onResponse(channel); + assertTrue(writeContext.hasQueuedWriteOps()); + + when(writeOperation2.isFullyFlushed()).thenReturn(true); + + writeContext.flushChannel(); + + verify(listener2).onResponse(channel); + assertFalse(writeContext.hasQueuedWriteOps()); + } + + private class ConsumeAllChannel extends NioSocketChannel { + + private byte[] bytes; + private byte[] bytes2; + + ConsumeAllChannel() throws IOException { + super("", mock(SocketChannel.class)); + } + + public int write(ByteBuffer buffer) throws IOException { + bytes = new byte[buffer.remaining()]; + buffer.get(bytes); + return bytes.length; + } + + public long vectorizedWrite(ByteBuffer[] buffer) throws IOException { + if (buffer.length != 2) { + throw new IOException("Only allows 2 buffers"); + } + bytes = new byte[buffer[0].remaining()]; + buffer[0].get(bytes); + + bytes2 = new byte[buffer[1].remaining()]; + buffer[1].get(bytes2); + return bytes.length + bytes2.length; + } + } + + private class HalfConsumeChannel extends NioSocketChannel { + + private byte[] bytes; + private byte[] bytes2; + + HalfConsumeChannel() throws IOException { + super("", mock(SocketChannel.class)); + } + + public int write(ByteBuffer buffer) throws IOException { + bytes = new byte[buffer.limit() / 2]; + buffer.get(bytes); + return bytes.length; + } + + public long vectorizedWrite(ByteBuffer[] buffers) throws IOException { + if (buffers.length != 2) { + throw new IOException("Only allows 2 buffers"); + } + if (bytes == null) { + bytes = new byte[buffers[0].remaining()]; + bytes2 = new byte[buffers[1].remaining()]; + } + + if (buffers[0].remaining() != 0) { + buffers[0].get(bytes); + return bytes.length; + } else { + buffers[1].get(bytes2); + return bytes2.length; + } + } + } + + private class MultiWriteChannel extends NioSocketChannel { + + private byte[] write1Bytes; + private byte[] write1Bytes2; + private byte[] write2Bytes1; + private byte[] write2Bytes2; + + MultiWriteChannel() throws IOException { + super("", mock(SocketChannel.class)); + } + + public long vectorizedWrite(ByteBuffer[] buffers) throws IOException { + if (buffers.length != 4 && write1Bytes == null) { + throw new IOException("Only allows 4 buffers"); + } else if (buffers.length != 2 && write1Bytes != null) { + throw new IOException("Only allows 2 buffers on second write"); + } + if (write1Bytes == null) { + write1Bytes = new byte[buffers[0].remaining()]; + write1Bytes2 = new byte[buffers[1].remaining()]; + write2Bytes1 = new byte[buffers[2].remaining()]; + write2Bytes2 = new byte[buffers[3].remaining()]; + } + + if (buffers[0].remaining() != 0) { + buffers[0].get(write1Bytes); + buffers[1].get(write1Bytes2); + buffers[2].get(write2Bytes1); + return write1Bytes.length + write1Bytes2.length + write2Bytes1.length; + } else { + buffers[1].get(write2Bytes2); + return write2Bytes2.length; + } + } + } + + private byte[] generateBytes(int n) { + n += 10; + byte[] bytes = new byte[n]; + for (int i = 0; i < n; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } + +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/utils/TestSelectionKey.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/utils/TestSelectionKey.java new file mode 100644 index 00000000000..0f0011f1553 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/utils/TestSelectionKey.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio.utils; + +import java.nio.channels.SelectableChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.spi.AbstractSelectionKey; + +public class TestSelectionKey extends AbstractSelectionKey { + + private int ops = 0; + private int readyOps; + + public TestSelectionKey(int ops) { + this.ops = ops; + } + + @Override + public SelectableChannel channel() { + return null; + } + + @Override + public Selector selector() { + return null; + } + + @Override + public int interestOps() { + return ops; + } + + @Override + public SelectionKey interestOps(int ops) { + this.ops = ops; + return this; + } + + @Override + public int readyOps() { + return readyOps; + } + + public void setReadyOps(int readyOps) { + this.readyOps = readyOps; + } +}