diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 36f3b36e7cd..65402290e01 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -28,6 +28,7 @@ import org.gradle.api.Task import org.gradle.api.XmlProvider import org.gradle.api.artifacts.Configuration import org.gradle.api.artifacts.ModuleDependency +import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler @@ -294,12 +295,15 @@ class BuildPlugin implements Plugin { * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms. * * */ private static Closure fixupDependencies(Project project) { - // TODO: remove this when enforcing gradle 2.14+, it now properly handles exclusions + // TODO: revisit this when upgrading to Gradle 2.14+, see Javadoc comment above return { XmlProvider xml -> // first find if we have dependencies at all, and grab the node NodeList depsNodes = xml.asNode().get('dependencies') @@ -334,10 +338,19 @@ class BuildPlugin implements Plugin { continue } - // we now know we have something to exclude, so add a wildcard exclusion element - Node exclusion = depNode.appendNode('exclusions').appendNode('exclusion') - exclusion.appendNode('groupId', '*') - exclusion.appendNode('artifactId', '*') + // we now know we have something to exclude, so add exclusions for all artifacts except the main one + Node exclusions = depNode.appendNode('exclusions') + for (ResolvedArtifact artifact : artifacts) { + ModuleVersionIdentifier moduleVersionIdentifier = artifact.moduleVersion.id; + String depGroupId = moduleVersionIdentifier.group + String depArtifactId = moduleVersionIdentifier.name + // add exclusions for all artifacts except the main one + if (depGroupId != groupId || depArtifactId != artifactId) { + Node exclusion = exclusions.appendNode('exclusion') + exclusion.appendNode('groupId', depGroupId) + exclusion.appendNode('artifactId', depArtifactId) + } + } } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 11bdbd19525..a46a7bda374 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -38,7 +38,7 @@ public class DocsTestPlugin extends RestTestPlugin { * the last released version for docs. */ '\\{version\\}': VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), - '\\{lucene_version\\}' : VersionProperties.lucene, + '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 48183a07721..36828f1cb86 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -62,6 +62,15 @@ class ClusterConfiguration { @Input boolean debug = false + /** + * if true each node will be configured with discovery.zen.minimum_master_nodes set + * to the total number of nodes in the cluster. This will also cause that each node has `0s` state recovery + * timeout which can lead to issues if for instance an existing clusterstate is expected to be recovered + * before any tests start + */ + @Input + boolean useMinimumMasterNodes = true + @Input String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + @@ -95,11 +104,11 @@ class ClusterConfiguration { @Input Closure waitCondition = { NodeInfo node, AntBuilder ant -> File tmpFile = new File(node.cwd, 'wait.success') - ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}") + ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow") // checking here for wait_for_nodes to be >= the number of nodes because its possible // this cluster is attempting to connect to nodes created by another task (same cluster name), // so there will be more nodes in that case in the cluster state - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}", + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", dest: tmpFile.toString(), ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task retries: 10) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 957e845aa57..2095c892f50 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -73,8 +73,8 @@ class ClusterFormationTasks { } // this is our current version distribution configuration we use for all kinds of REST tests etc. String distroConfigName = "${task.name}_elasticsearchDistro" - Configuration distro = project.configurations.create(distroConfigName) - configureDistributionDependency(project, config.distribution, distro, VersionProperties.elasticsearch) + Configuration currentDistro = project.configurations.create(distroConfigName) + configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) if (config.bwcVersion != null && config.numBwcNodes > 0) { // if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version // this version uses the same distribution etc. and only differs in the version we depend on. @@ -85,11 +85,11 @@ class ClusterFormationTasks { } configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion) } - - for (int i = 0; i < config.numNodes; ++i) { + for (int i = 0; i < config.numNodes; i++) { // we start N nodes and out of these N nodes there might be M bwc nodes. // for each of those nodes we might have a different configuratioon String elasticsearchVersion = VersionProperties.elasticsearch + Configuration distro = currentDistro if (i < config.numBwcNodes) { elasticsearchVersion = config.bwcVersion distro = project.configurations.elasticsearchBwcDistro @@ -252,9 +252,17 @@ class ClusterFormationTasks { 'path.repo' : "${node.sharedDir}/repo", 'path.shared_data' : "${node.sharedDir}/", // Define a node attribute so we can test that it exists - 'node.attr.testattr' : 'test', + 'node.attr.testattr' : 'test', 'repositories.url.allowed_urls': 'http://snapshot.test*' ] + // we set min master nodes to the total number of nodes in the cluster and + // basically skip initial state recovery to allow the cluster to form using a realistic master election + // this means all nodes must be up, join the seed node and do a master election. This will also allow new and + // old nodes in the BWC case to become the master + if (node.config.useMinimumMasterNodes && node.config.numNodes > 1) { + esConfig['discovery.zen.minimum_master_nodes'] = node.config.numNodes + esConfig['discovery.initial_state_timeout'] = '0s' // don't wait for state.. just start up quickly + } esConfig['node.max_local_storage_nodes'] = node.config.numNodes esConfig['http.port'] = node.config.httpPort esConfig['transport.tcp.port'] = node.config.transportPort diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index d50937408e7..51bccb4fe75 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -55,7 +55,9 @@ public class RestIntegTestTask extends RandomizedTestingTask { parallelism = '1' include('**/*IT.class') systemProperty('tests.rest.load_packaged', 'false') - systemProperty('tests.rest.cluster', "${-> nodes[0].httpUri()}") + // we pass all nodes to the rest cluster to allow the clients to round-robin between them + // this is more realistic than just talking to a single node + systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") systemProperty('tests.config.dir', "${-> nodes[0].confDir}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index c461a53fd88..d37e7132d68 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -459,7 +459,6 @@ - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c630ca3ebb8..bbf4170591d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 6.0.0-alpha1 -lucene = 6.2.0 +lucene = 6.3.0-snapshot-a66a445 # optional dependencies spatial4j = 0.6 @@ -11,7 +11,7 @@ slf4j = 1.6.2 jna = 4.2.2 # test dependencies -randomizedrunner = 2.3.2 +randomizedrunner = 2.4.0 junit = 4.11 httpclient = 4.5.2 httpcore = 4.4.5 diff --git a/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java index da7f5c79721..56b89db1694 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java +++ b/client/rest/src/main/java/org/elasticsearch/client/HeapBufferedAsyncResponseConsumer.java @@ -46,7 +46,7 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons //default buffer limit is 10MB public static final int DEFAULT_BUFFER_LIMIT = 10 * 1024 * 1024; - private final int bufferLimit; + private final int bufferLimitBytes; private volatile HttpResponse response; private volatile SimpleInputBuffer buf; @@ -54,7 +54,7 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons * Creates a new instance of this consumer with a buffer limit of {@link #DEFAULT_BUFFER_LIMIT} */ public HeapBufferedAsyncResponseConsumer() { - this.bufferLimit = DEFAULT_BUFFER_LIMIT; + this.bufferLimitBytes = DEFAULT_BUFFER_LIMIT; } /** @@ -64,7 +64,14 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons if (bufferLimit <= 0) { throw new IllegalArgumentException("bufferLimit must be greater than 0"); } - this.bufferLimit = bufferLimit; + this.bufferLimitBytes = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimitBytes; } @Override @@ -75,9 +82,9 @@ public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseCons @Override protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException { long len = entity.getContentLength(); - if (len > bufferLimit) { + if (len > bufferLimitBytes) { throw new ContentTooLongException("entity content is too long [" + len + - "] for the configured buffer limit [" + bufferLimit + "]"); + "] for the configured buffer limit [" + bufferLimitBytes + "]"); } if (len < 0) { len = 4096; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index d2301e1e8e7..b8eb98b4aee 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -510,6 +510,7 @@ public class RestClient implements Closeable { private static URI buildUri(String pathPrefix, String path, Map params) { Objects.requireNonNull(params, "params must not be null"); + Objects.requireNonNull(path, "path must not be null"); try { String fullPath; if (pathPrefix != null) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index 789f2bf6f6d..17c2a158ea8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.http.HttpEntity; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; @@ -62,7 +62,7 @@ public class RequestLoggerTests extends RestClientTestCase { } HttpRequestBase request; - int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7); + int requestType = RandomNumbers.randomIntBetween(getRandom(), 0, 7); switch(requestType) { case 0: request = new HttpGetWithEntity(uri); @@ -99,7 +99,7 @@ public class RequestLoggerTests extends RestClientTestCase { expected += " -d '" + requestBody + "'"; HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; - switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) { + switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: entity = new StringEntity(requestBody, StandardCharsets.UTF_8); break; @@ -128,12 +128,12 @@ public class RequestLoggerTests extends RestClientTestCase { public void testTraceResponse() throws IOException { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - int statusCode = RandomInts.randomIntBetween(getRandom(), 200, 599); + int statusCode = RandomNumbers.randomIntBetween(getRandom(), 200, 599); String reasonPhrase = "REASON"; BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); String expected = "# " + statusLine.toString(); BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); - int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3); + int numHeaders = RandomNumbers.randomIntBetween(getRandom(), 0, 3); for (int i = 0; i < numHeaders; i++) { httpResponse.setHeader("header" + i, "value"); expected += "\n# header" + i + ": value"; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 9c5c50946d8..941af2246f8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -229,6 +229,17 @@ public class RestClientIntegTests extends RestClientTestCase { } } + public void testPath() throws IOException { + for (String method : getHttpMethods()) { + try { + restClient.performRequest(method, null); + fail("path set to null should fail!"); + } catch (NullPointerException e) { + assertEquals("path must not be null", e.getMessage()); + } + } + } + private void bodyTest(String method) throws IOException { String requestBody = "{ \"field\": \"value\" }"; StringEntity entity = new StringEntity(requestBody); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index 049a216936f..90ee4431009 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.HttpResponse; @@ -95,7 +95,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { return null; } }); - int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5); + int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5); httpHosts = new HttpHost[numHosts]; for (int i = 0; i < numHosts; i++) { httpHosts[i] = new HttpHost("localhost", 9200 + i); @@ -105,7 +105,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinOkStatusCodes() throws IOException { - int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = new HashSet<>(); Collections.addAll(hostsSet, httpHosts); @@ -121,7 +121,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testRoundRobinNoRetryErrors() throws IOException { - int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); for (int i = 0; i < numIters; i++) { Set hostsSet = new HashSet<>(); Collections.addAll(hostsSet, httpHosts); @@ -198,7 +198,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); } - int numIters = RandomInts.randomIntBetween(getRandom(), 2, 5); + int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5); for (int i = 1; i <= numIters; i++) { //check that one different host is resurrected at each new attempt Set hostsSet = new HashSet<>(); @@ -228,7 +228,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { if (getRandom().nextBoolean()) { //mark one host back alive through a successful request and check that all requests after that are sent to it HttpHost selectedHost = null; - int iters = RandomInts.randomIntBetween(getRandom(), 2, 10); + int iters = RandomNumbers.randomIntBetween(getRandom(), 2, 10); for (int y = 0; y < iters; y++) { int statusCode = randomErrorNoRetryStatusCode(getRandom()); Response response; @@ -269,7 +269,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } private static String randomErrorRetryEndpoint() { - switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) { + switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { case 0: return "/" + randomErrorRetryStatusCode(getRandom()); case 1: diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index a926cabb87d..aeb0620134b 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.fasterxml.jackson.core.JsonFactory; @@ -69,7 +69,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { @Before public void startHttpServer() throws IOException { - this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000); + this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000); this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values()); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); @@ -101,7 +101,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { assertEquals(e.getMessage(), "scheme cannot be null"); } try { - new ElasticsearchHostsSniffer(restClient, RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), + new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), ElasticsearchHostsSniffer.Scheme.HTTP); fail("should have failed"); } catch (IllegalArgumentException e) { @@ -175,7 +175,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException { - int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); List hosts = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); StringWriter writer = new StringWriter(); @@ -205,7 +205,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { boolean isHttpEnabled = rarely() == false; if (isHttpEnabled) { String host = "host" + i; - int port = RandomInts.randomIntBetween(getRandom(), 9200, 9299); + int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); HttpHost httpHost = new HttpHost(host, port, scheme.toString()); hosts.add(httpHost); generator.writeObjectFieldStart("http"); @@ -228,7 +228,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } if (getRandom().nextBoolean()) { String[] roles = {"master", "data", "ingest"}; - int numRoles = RandomInts.randomIntBetween(getRandom(), 0, 3); + int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Set nodeRoles = new HashSet<>(numRoles); for (int j = 0; j < numRoles; j++) { String role; @@ -242,7 +242,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } generator.writeEndArray(); } - int numAttributes = RandomInts.randomIntBetween(getRandom(), 0, 3); + int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Map attributes = new HashMap<>(numAttributes); for (int j = 0; j < numAttributes; j++) { attributes.put("attr" + j, "value" + j); @@ -291,6 +291,6 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { } private static int randomErrorResponseCode() { - return RandomInts.randomIntBetween(getRandom(), 400, 599); + return RandomNumbers.randomIntBetween(getRandom(), 400, 599); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index b0c387d733a..9a7359e9c72 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.client.sniff; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.http.HttpHost; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientTestCase; @@ -31,7 +31,7 @@ import static org.junit.Assert.fail; public class SnifferBuilderTests extends RestClientTestCase { public void testBuild() throws Exception { - int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { hosts[i] = new HttpHost("localhost", 9200 + i); @@ -46,14 +46,14 @@ public class SnifferBuilderTests extends RestClientTestCase { } try { - Sniffer.builder(client).setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client).setSniffIntervalMillis(RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage()); } try { - Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); @@ -74,10 +74,10 @@ public class SnifferBuilderTests extends RestClientTestCase { SnifferBuilder builder = Sniffer.builder(client); if (getRandom().nextBoolean()) { - builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + builder.setSniffIntervalMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { - builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); } if (getRandom().nextBoolean()) { builder.setHostsSniffer(new MockHostsSniffer()); diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index b6f0020fb5b..ac9770f2bc8 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.index.Term; +import org.apache.lucene.queryparser.analyzing.AnalyzingQueryParser; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -34,6 +35,7 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.common.lucene.search.Queries; @@ -42,6 +44,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.LegacyDateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -63,7 +66,7 @@ import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfN * Also breaks fields with [type].[name] into a boolean query that must include the type * as well as the query on the name. */ -public class MapperQueryParser extends QueryParser { +public class MapperQueryParser extends AnalyzingQueryParser { public static final Map FIELD_QUERY_EXTENSIONS; @@ -99,11 +102,11 @@ public class MapperQueryParser extends QueryParser { setAutoGeneratePhraseQueries(settings.autoGeneratePhraseQueries()); setMaxDeterminizedStates(settings.maxDeterminizedStates()); setAllowLeadingWildcard(settings.allowLeadingWildcard()); - setLowercaseExpandedTerms(settings.lowercaseExpandedTerms()); + setLowercaseExpandedTerms(false); setPhraseSlop(settings.phraseSlop()); setDefaultOperator(settings.defaultOperator()); setFuzzyPrefixLength(settings.fuzzyPrefixLength()); - setLocale(settings.locale()); + setSplitOnWhitespace(settings.splitOnWhitespace()); } /** @@ -329,21 +332,20 @@ public class MapperQueryParser extends QueryParser { boolean startInclusive, boolean endInclusive, QueryShardContext context) { currentFieldType = context.fieldMapper(field); if (currentFieldType != null) { - if (lowercaseExpandedTerms && currentFieldType.tokenized()) { - part1 = part1 == null ? null : part1.toLowerCase(locale); - part2 = part2 == null ? null : part2.toLowerCase(locale); - } - try { + BytesRef part1Binary = part1 == null ? null : getAnalyzer().normalize(field, part1); + BytesRef part2Binary = part2 == null ? null : getAnalyzer().normalize(field, part2); Query rangeQuery; if (currentFieldType instanceof LegacyDateFieldMapper.DateFieldType && settings.timeZone() != null) { LegacyDateFieldMapper.DateFieldType dateFieldType = (LegacyDateFieldMapper.DateFieldType) this.currentFieldType; - rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null, context); + rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary, + startInclusive, endInclusive, settings.timeZone(), null, context); } else if (currentFieldType instanceof DateFieldMapper.DateFieldType && settings.timeZone() != null) { DateFieldMapper.DateFieldType dateFieldType = (DateFieldMapper.DateFieldType) this.currentFieldType; - rangeQuery = dateFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null, context); + rangeQuery = dateFieldType.rangeQuery(part1Binary, part2Binary, + startInclusive, endInclusive, settings.timeZone(), null, context); } else { - rangeQuery = currentFieldType.rangeQuery(part1, part2, startInclusive, endInclusive, context); + rangeQuery = currentFieldType.rangeQuery(part1Binary, part2Binary, startInclusive, endInclusive, context); } return rangeQuery; } catch (RuntimeException e) { @@ -357,9 +359,6 @@ public class MapperQueryParser extends QueryParser { } protected Query getFuzzyQuery(String field, String termStr, String minSimilarity) throws ParseException { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -398,8 +397,9 @@ public class MapperQueryParser extends QueryParser { currentFieldType = context.fieldMapper(field); if (currentFieldType != null) { try { - return currentFieldType.fuzzyQuery(termStr, Fuzziness.build(minSimilarity), - fuzzyPrefixLength, settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions); + BytesRef term = termStr == null ? null : getAnalyzer().normalize(field, termStr); + return currentFieldType.fuzzyQuery(term, Fuzziness.build(minSimilarity), + getFuzzyPrefixLength(), settings.fuzzyMaxExpansions(), FuzzyQuery.defaultTranspositions); } catch (RuntimeException e) { if (settings.lenient()) { return null; @@ -422,9 +422,6 @@ public class MapperQueryParser extends QueryParser { @Override protected Query getPrefixQuery(String field, String termStr) throws ParseException { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -470,8 +467,8 @@ public class MapperQueryParser extends QueryParser { setAnalyzer(context.getSearchAnalyzer(currentFieldType)); } Query query = null; - if (currentFieldType.tokenized() == false) { - query = currentFieldType.prefixQuery(termStr, multiTermRewriteMethod, context); + if (currentFieldType instanceof StringFieldType == false) { + query = currentFieldType.prefixQuery(termStr, getMultiTermRewriteMethod(), context); } if (query == null) { query = getPossiblyAnalyzedPrefixQuery(currentFieldType.name(), termStr); @@ -589,9 +586,6 @@ public class MapperQueryParser extends QueryParser { return FIELD_QUERY_EXTENSIONS.get(ExistsFieldQueryExtension.NAME).query(context, actualField); } } - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -638,9 +632,8 @@ public class MapperQueryParser extends QueryParser { setAnalyzer(context.getSearchAnalyzer(currentFieldType)); } indexedNameField = currentFieldType.name(); - return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr); } - return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr); + return super.getWildcardQuery(indexedNameField, termStr); } catch (RuntimeException e) { if (settings.lenient()) { return null; @@ -651,75 +644,8 @@ public class MapperQueryParser extends QueryParser { } } - private Query getPossiblyAnalyzedWildcardQuery(String field, String termStr) throws ParseException { - if (!settings.analyzeWildcard()) { - return super.getWildcardQuery(field, termStr); - } - boolean isWithinToken = (!termStr.startsWith("?") && !termStr.startsWith("*")); - StringBuilder aggStr = new StringBuilder(); - StringBuilder tmp = new StringBuilder(); - for (int i = 0; i < termStr.length(); i++) { - char c = termStr.charAt(i); - if (c == '?' || c == '*') { - if (isWithinToken) { - try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) { - source.reset(); - CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); - if (source.incrementToken()) { - String term = termAtt.toString(); - if (term.length() == 0) { - // no tokens, just use what we have now - aggStr.append(tmp); - } else { - aggStr.append(term); - } - } else { - // no tokens, just use what we have now - aggStr.append(tmp); - } - } catch (IOException e) { - aggStr.append(tmp); - } - tmp.setLength(0); - } - isWithinToken = false; - aggStr.append(c); - } else { - tmp.append(c); - isWithinToken = true; - } - } - if (isWithinToken) { - try { - try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) { - source.reset(); - CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); - if (source.incrementToken()) { - String term = termAtt.toString(); - if (term.length() == 0) { - // no tokens, just use what we have now - aggStr.append(tmp); - } else { - aggStr.append(term); - } - } else { - // no tokens, just use what we have now - aggStr.append(tmp); - } - } - } catch (IOException e) { - aggStr.append(tmp); - } - } - - return super.getWildcardQuery(field, aggStr.toString()); - } - @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { - if (lowercaseExpandedTerms) { - termStr = termStr.toLowerCase(locale); - } Collection fields = extractMultiFields(field); if (fields != null) { if (fields.size() == 1) { @@ -767,7 +693,7 @@ public class MapperQueryParser extends QueryParser { Query query = null; if (currentFieldType.tokenized() == false) { query = currentFieldType.regexpQuery(termStr, RegExp.ALL, - maxDeterminizedStates, multiTermRewriteMethod, context); + getMaxDeterminizedStates(), getMultiTermRewriteMethod(), context); } if (query == null) { query = super.getRegexpQuery(field, termStr); diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java b/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java index c1fc2ae556e..295c1ace4f6 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/QueryParserSettings.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.MultiTermQuery; import org.elasticsearch.common.unit.Fuzziness; import org.joda.time.DateTimeZone; -import java.util.Locale; import java.util.Map; /** @@ -53,12 +52,8 @@ public class QueryParserSettings { private boolean analyzeWildcard; - private boolean lowercaseExpandedTerms; - private boolean enablePositionIncrements; - private Locale locale; - private Fuzziness fuzziness; private int fuzzyPrefixLength; private int fuzzyMaxExpansions; @@ -79,6 +74,8 @@ public class QueryParserSettings { /** To limit effort spent determinizing regexp queries. */ private int maxDeterminizedStates; + private boolean splitOnWhitespace; + public QueryParserSettings(String queryString) { this.queryString = queryString; } @@ -135,14 +132,6 @@ public class QueryParserSettings { this.allowLeadingWildcard = allowLeadingWildcard; } - public boolean lowercaseExpandedTerms() { - return lowercaseExpandedTerms; - } - - public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - } - public boolean enablePositionIncrements() { return enablePositionIncrements; } @@ -267,14 +256,6 @@ public class QueryParserSettings { this.useDisMax = useDisMax; } - public void locale(Locale locale) { - this.locale = locale; - } - - public Locale locale() { - return this.locale; - } - public void timeZone(DateTimeZone timeZone) { this.timeZone = timeZone; } @@ -290,4 +271,12 @@ public class QueryParserSettings { public Fuzziness fuzziness() { return fuzziness; } + + public void splitOnWhitespace(boolean value) { + this.splitOnWhitespace = value; + } + + public boolean splitOnWhitespace() { + return splitOnWhitespace; + } } diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 9fd978feb4f..eb33dbe4b18 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -19,7 +19,6 @@ package org.elasticsearch; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.common.io.stream.StreamInput; @@ -488,7 +487,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, org.elasticsearch.snapshots.SnapshotCreationException::new, 27), DELETE_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.DeleteFailedEngineException.class, - org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28), + org.elasticsearch.index.engine.DeleteFailedEngineException::new, 28),// deprecated in 6.0, remove in 7.0 DOCUMENT_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentMissingException.class, org.elasticsearch.index.engine.DocumentMissingException::new, 29), SNAPSHOT_EXCEPTION(org.elasticsearch.snapshots.SnapshotException.class, @@ -582,7 +581,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte ROUTING_MISSING_EXCEPTION(org.elasticsearch.action.RoutingMissingException.class, org.elasticsearch.action.RoutingMissingException::new, 79), INDEX_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.IndexFailedEngineException.class, - org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), + org.elasticsearch.index.engine.IndexFailedEngineException::new, 80), // deprecated in 6.0, remove in 7.0 INDEX_SHARD_RESTORE_FAILED_EXCEPTION(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException::new, 81), REPOSITORY_EXCEPTION(org.elasticsearch.repositories.RepositoryException.class, diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 0d8c3b72672..e9e950ce80a 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -89,8 +89,10 @@ public class Version { public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_5_0_0_rc1_ID = 5000051; public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final int V_5_0_0_ID = 5000099; + public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); public static final int V_6_0_0_alpha1_ID = 6000001; - public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0); + public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0); public static final Version CURRENT = V_6_0_0_alpha1; /* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT. @@ -115,6 +117,8 @@ public class Version { switch (id) { case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_0_0_ID: + return V_5_0_0; case V_5_0_0_rc1_ID: return V_5_0_0_rc1; case V_5_0_0_beta1_ID: diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index ae4a59fa83f..bb1afe5e19e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; @@ -59,7 +58,8 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera return pendingTasks.iterator(); } - public String prettyPrint() { + @Override + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("tasks: (").append(pendingTasks.size()).append("):\n"); for (PendingClusterTask pendingClusterTask : this) { @@ -68,19 +68,6 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera return sb.toString(); } - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray(Fields.TASKS); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index c7cfd6330cb..01528c7d228 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -21,19 +21,15 @@ package org.elasticsearch.action.bulk; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -51,10 +47,12 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; @@ -62,11 +60,15 @@ import org.elasticsearch.transport.TransportService; import java.util.Map; +import static org.elasticsearch.action.delete.TransportDeleteAction.executeDeleteRequestOnPrimary; +import static org.elasticsearch.action.delete.TransportDeleteAction.executeDeleteRequestOnReplica; +import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary; +import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnReplica; import static org.elasticsearch.action.support.replication.ReplicationOperation.ignoreReplicaException; import static org.elasticsearch.action.support.replication.ReplicationOperation.isConflictException; /** Performs shard-level bulk (index, delete or update) operations */ -public class TransportShardBulkAction extends TransportWriteAction { +public class TransportShardBulkAction extends TransportWriteAction { public static final String ACTION_NAME = BulkAction.NAME + "[s]"; @@ -80,7 +82,7 @@ public class TransportShardBulkAction extends TransportWriteAction onPrimaryShard(BulkShardRequest request, IndexShard primary) throws Exception { + protected WritePrimaryResult shardOperationOnPrimary(BulkShardRequest request, IndexShard primary) throws Exception { final IndexMetaData metaData = primary.indexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; @@ -118,30 +120,86 @@ public class TransportShardBulkAction extends TransportWriteAction(response, location); + return new WritePrimaryResult(request, response, location, null, primary); } /** Executes bulk item requests and handles request execution exceptions */ - private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, + private Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexShard primary, BulkShardRequest request, long[] preVersions, VersionType[] preVersionTypes, - Translog.Location location, int requestIndex) { - preVersions[requestIndex] = request.items()[requestIndex].request().version(); - preVersionTypes[requestIndex] = request.items()[requestIndex].request().versionType(); - DocWriteRequest.OpType opType = request.items()[requestIndex].request().opType(); + Translog.Location location, int requestIndex) throws Exception { + final DocWriteRequest itemRequest = request.items()[requestIndex].request(); + preVersions[requestIndex] = itemRequest.version(); + preVersionTypes[requestIndex] = itemRequest.versionType(); + DocWriteRequest.OpType opType = itemRequest.opType(); try { - WriteResult writeResult = innerExecuteBulkItemRequest(metaData, indexShard, - request, requestIndex); - if (writeResult.getLocation() != null) { - location = locationToSync(location, writeResult.getLocation()); - } else { - assert writeResult.getResponse().getResult() == DocWriteResponse.Result.NOOP - : "only noop operation can have null next operation"; + // execute item request + final Engine.Result operationResult; + final DocWriteResponse response; + final BulkItemRequest replicaRequest; + switch (itemRequest.opType()) { + case CREATE: + case INDEX: + final IndexRequest indexRequest = (IndexRequest) itemRequest; + Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); + operationResult = indexResult; + response = indexResult.hasFailure() ? null + : new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), + indexResult.getVersion(), indexResult.isCreated()); + replicaRequest = request.items()[requestIndex]; + break; + case UPDATE: + UpdateResultHolder updateResultHolder = executeUpdateRequest(((UpdateRequest) itemRequest), + primary, metaData, request, requestIndex); + operationResult = updateResultHolder.operationResult; + response = updateResultHolder.response; + replicaRequest = updateResultHolder.replicaRequest; + break; + case DELETE: + final DeleteRequest deleteRequest = (DeleteRequest) itemRequest; + Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary); + operationResult = deleteResult; + response = deleteResult.hasFailure() ? null : + new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), + deleteResult.getVersion(), deleteResult.isFound()); + replicaRequest = request.items()[requestIndex]; + break; + default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); } // update the bulk item request because update request execution can mutate the bulk item request - BulkItemRequest item = request.items()[requestIndex]; - // add the response - setResponse(item, new BulkItemResponse(item.id(), opType, writeResult.getResponse())); + request.items()[requestIndex] = replicaRequest; + if (operationResult == null) { // in case of noop update operation + assert response.getResult() == DocWriteResponse.Result.NOOP + : "only noop update can have null operation"; + replicaRequest.setIgnoreOnReplica(); + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response)); + } else if (operationResult.hasFailure() == false) { + location = locationToSync(location, operationResult.getTranslogLocation()); + BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); + replicaRequest.setPrimaryResponse(primaryResponse); + // set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. + primaryResponse.getResponse().setShardInfo(new ShardInfo()); + } else { + DocWriteRequest docWriteRequest = replicaRequest.request(); + Exception failure = operationResult.getFailure(); + if (isConflictException(failure)) { + logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); + } else { + logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); + } + // if its a conflict failure, and we already executed the request on a primary (and we execute it + // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) + // then just use the response we got from the successful execution + if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { + replicaRequest.setIgnoreOnReplica(); + replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); + } + } + assert replicaRequest.getPrimaryResponse() != null; + assert preVersionTypes[requestIndex] != null; } catch (Exception e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it if (retryPrimaryException(e)) { @@ -151,147 +209,165 @@ public class TransportShardBulkAction extends TransportWriteAction) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), docWriteRequest.opType().getLowercase(), request), e); - } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", - request.shardId(), docWriteRequest.opType().getLowercase(), request), e); - } - // if its a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(e)) { - setResponse(item, item.getPrimaryResponse()); - } else { - setResponse(item, new BulkItemResponse(item.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), e))); } + throw e; } - assert request.items()[requestIndex].getPrimaryResponse() != null; - assert preVersionTypes[requestIndex] != null; return location; } - private WriteResult innerExecuteBulkItemRequest(IndexMetaData metaData, IndexShard indexShard, - BulkShardRequest request, int requestIndex) throws Exception { - DocWriteRequest itemRequest = request.items()[requestIndex].request(); - switch (itemRequest.opType()) { - case CREATE: - case INDEX: - return TransportIndexAction.executeIndexRequestOnPrimary(((IndexRequest) itemRequest), indexShard, mappingUpdatedAction); - case UPDATE: - int maxAttempts = ((UpdateRequest) itemRequest).retryOnConflict(); - for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { - try { - return shardUpdateOperation(metaData, indexShard, request, requestIndex, ((UpdateRequest) itemRequest)); - } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - if (attemptCount == maxAttempts // bubble up exception when we run out of attempts - || (cause instanceof VersionConflictEngineException) == false) { // or when exception is not a version conflict - throw e; - } - } - } - throw new IllegalStateException("version conflict exception should bubble up on last attempt"); - case DELETE: - return TransportDeleteAction.executeDeleteRequestOnPrimary(((DeleteRequest) itemRequest), indexShard); - default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found"); - } - } + private static class UpdateResultHolder { + final BulkItemRequest replicaRequest; + final Engine.Result operationResult; + final DocWriteResponse response; - private void setResponse(BulkItemRequest request, BulkItemResponse response) { - request.setPrimaryResponse(response); - if (response.isFailed()) { - request.setIgnoreOnReplica(); - } else { - // Set the ShardInfo to 0 so we can safely send it to the replicas. We won't use it in the real response though. - response.getResponse().setShardInfo(new ShardInfo()); + private UpdateResultHolder(BulkItemRequest replicaRequest, Engine.Result operationResult, + DocWriteResponse response) { + this.replicaRequest = replicaRequest; + this.operationResult = operationResult; + this.response = response; } } /** - * Executes update request, doing a get and translating update to a index or delete operation - * NOTE: all operations except NOOP, reassigns the bulk item request - */ - private WriteResult shardUpdateOperation(IndexMetaData metaData, IndexShard indexShard, - BulkShardRequest request, - int requestIndex, UpdateRequest updateRequest) - throws Exception { - // Todo: capture read version conflicts, missing documents and malformed script errors in the write result due to get request - UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard, threadPool::estimatedTimeInMillis); - switch (translate.getResponseResult()) { - case CREATED: - case UPDATED: - IndexRequest indexRequest = translate.action(); - MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); - indexRequest.process(mappingMd, allowIdGeneration, request.index()); - WriteResult writeResult = TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); - BytesReference indexSourceAsBytes = indexRequest.source(); - IndexResponse indexResponse = writeResult.getResponse(); - UpdateResponse update = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult()); - if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || - (updateRequest.fields() != null && updateRequest.fields().length > 0)) { - Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); - update.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + * Executes update request, delegating to a index or delete operation after translation, + * handles retries on version conflict and constructs update response + * NOTE: reassigns bulk item request at requestIndex for replicas to + * execute translated update request (NOOP update is an exception). NOOP updates are + * indicated by returning a null operation in {@link UpdateResultHolder} + * */ + private UpdateResultHolder executeUpdateRequest(UpdateRequest updateRequest, IndexShard primary, + IndexMetaData metaData, BulkShardRequest request, + int requestIndex) throws Exception { + Engine.Result updateOperationResult = null; + UpdateResponse updateResponse = null; + BulkItemRequest replicaRequest = request.items()[requestIndex]; + int maxAttempts = updateRequest.retryOnConflict(); + for (int attemptCount = 0; attemptCount <= maxAttempts; attemptCount++) { + final UpdateHelper.Result translate; + // translate update request + try { + translate = updateHelper.prepare(updateRequest, primary, threadPool::estimatedTimeInMillis); + } catch (Exception failure) { + // we may fail translating a update to index or delete operation + // we use index result to communicate failure while translating update request + updateOperationResult = new Engine.IndexResult(failure, updateRequest.version()); + break; // out of retry loop + } + // execute translated update request + switch (translate.getResponseResult()) { + case CREATED: + case UPDATED: + IndexRequest indexRequest = translate.action(); + MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); + indexRequest.process(mappingMd, allowIdGeneration, request.index()); + updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction); + break; + case DELETED: + updateOperationResult = executeDeleteRequestOnPrimary(translate.action(), primary); + break; + case NOOP: + primary.noopUpdate(updateRequest.type()); + break; + default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + } + if (updateOperationResult == null) { + // this is a noop operation + updateResponse = translate.action(); + break; // out of retry loop + } else if (updateOperationResult.hasFailure() == false) { + // enrich update response and + // set translated update (index/delete) request for replica execution in bulk items + switch (updateOperationResult.getOperationType()) { + case INDEX: + IndexRequest updateIndexRequest = translate.action(); + final IndexResponse indexResponse = new IndexResponse(primary.shardId(), + updateIndexRequest.type(), updateIndexRequest.id(), + updateOperationResult.getVersion(), ((Engine.IndexResult) updateOperationResult).isCreated()); + BytesReference indexSourceAsBytes = updateIndexRequest.source(); + updateResponse = new UpdateResponse(indexResponse.getShardInfo(), + indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), + indexResponse.getVersion(), indexResponse.getResult()); + if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || + (updateRequest.fields() != null && updateRequest.fields().length > 0)) { + Tuple> sourceAndContent = + XContentHelper.convertToMap(indexSourceAsBytes, true); + updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), + indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); + } + // set translated request as replica request + replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateIndexRequest); + break; + case DELETE: + DeleteRequest updateDeleteRequest = translate.action(); + DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), + updateDeleteRequest.type(), updateDeleteRequest.id(), + updateOperationResult.getVersion(), ((Engine.DeleteResult) updateOperationResult).isFound()); + updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), + deleteResponse.getShardId(), deleteResponse.getType(), deleteResponse.getId(), + deleteResponse.getVersion(), deleteResponse.getResult()); + updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, + request.index(), deleteResponse.getVersion(), translate.updatedSourceAsMap(), + translate.updateSourceContentType(), null)); + // set translated request as replica request + replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest); + break; } - // Replace the update request to the translated index request to execute on the replica. - request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); - return new WriteResult<>(update, writeResult.getLocation()); - case DELETED: - DeleteRequest deleteRequest = translate.action(); - WriteResult deleteResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse response = deleteResult.getResponse(); - UpdateResponse deleteUpdateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult()); - deleteUpdateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), translate.updatedSourceAsMap(), translate.updateSourceContentType(), null)); - // Replace the update request to the translated delete request to execute on the replica. - request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); - return new WriteResult<>(deleteUpdateResponse, deleteResult.getLocation()); - case NOOP: - BulkItemRequest item = request.items()[requestIndex]; - indexShard.noopUpdate(updateRequest.type()); - item.setIgnoreOnReplica(); // no need to go to the replica - return new WriteResult<>(translate.action(), null); - default: throw new IllegalStateException("Illegal update operation " + translate.getResponseResult()); + // successful operation + break; // out of retry loop + } else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) { + // not a version conflict exception + break; // out of retry loop + } } + return new UpdateResultHolder(replicaRequest, updateOperationResult, updateResponse); } @Override - protected Location onReplicaShard(BulkShardRequest request, IndexShard indexShard) { + protected WriteReplicaResult shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; - if (item == null || item.isIgnoreOnReplica()) { - continue; - } - DocWriteRequest docWriteRequest = item.request(); - final Engine.Operation operation; - try { - switch (docWriteRequest.opType()) { - case CREATE: - case INDEX: - operation = TransportIndexAction.executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), indexShard); - break; - case DELETE: - operation = TransportDeleteAction.executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), indexShard); - break; - default: throw new IllegalStateException("Unexpected request operation type on replica: " - + docWriteRequest.opType().getLowercase()); - } - location = locationToSync(location, operation.getTranslogLocation()); - } catch (Exception e) { - // if its not an ignore replica failure, we need to make sure to bubble up the failure - // so we will fail the shard - if (!ignoreReplicaException(e)) { - throw e; + if (item.isIgnoreOnReplica() == false) { + DocWriteRequest docWriteRequest = item.request(); + final Engine.Result operationResult; + try { + switch (docWriteRequest.opType()) { + case CREATE: + case INDEX: + operationResult = executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), replica); + break; + case DELETE: + operationResult = executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), replica); + break; + default: + throw new IllegalStateException("Unexpected request operation type on replica: " + + docWriteRequest.opType().getLowercase()); + } + if (operationResult.hasFailure()) { + // check if any transient write operation failures should be bubbled up + Exception failure = operationResult.getFailure(); + assert failure instanceof VersionConflictEngineException + || failure instanceof MapperParsingException + || failure instanceof EngineClosedException + || failure instanceof IndexShardClosedException + : "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" + + " failures. got " + failure; + if (!ignoreReplicaException(failure)) { + throw failure; + } + } else { + location = locationToSync(location, operationResult.getTranslogLocation()); + } + } catch (Exception e) { + // if its not an ignore replica failure, we need to make sure to bubble up the failure + // so we will fail the shard + if (!ignoreReplicaException(e)) { + throw e; + } } } } - return location; + return new WriteReplicaResult(request, location, null, replica); } private Translog.Location locationToSync(Translog.Location current, Translog.Location next) { diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 6f3d27ea369..e017bb9a75d 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -49,7 +48,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportWriteAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -61,7 +60,7 @@ public class TransportDeleteAction extends TransportWriteAction listener) { ClusterState state = clusterService.state(); if (autoCreateIndex.shouldAutoCreate(request.index(), state)) { - createIndexAction.execute(task, new CreateIndexRequest().index(request.index()).cause("auto(delete api)").masterNodeTimeout(request.timeout()), new ActionListener() { + CreateIndexRequest createIndexRequest = new CreateIndexRequest() + .index(request.index()) + .cause("auto(delete api)") + .masterNodeTimeout(request.timeout()); + createIndexAction.execute(task, createIndexRequest, new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(task, request, listener); @@ -119,30 +122,33 @@ public class TransportDeleteAction extends TransportWriteAction onPrimaryShard(DeleteRequest request, IndexShard indexShard) { - return executeDeleteRequestOnPrimary(request, indexShard); + protected WritePrimaryResult shardOperationOnPrimary(DeleteRequest request, IndexShard primary) throws Exception { + final Engine.DeleteResult result = executeDeleteRequestOnPrimary(request, primary); + final DeleteResponse response = result.hasFailure() ? null : + new DeleteResponse(primary.shardId(), request.type(), request.id(), result.getVersion(), result.isFound()); + return new WritePrimaryResult(request, response, result.getTranslogLocation(), result.getFailure(), primary); } @Override - protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { - return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); + protected WriteReplicaResult shardOperationOnReplica(DeleteRequest request, IndexShard replica) throws Exception { + final Engine.DeleteResult result = executeDeleteRequestOnReplica(request, replica); + return new WriteReplicaResult(request, result.getTranslogLocation(), result.getFailure(), replica); } - public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { - Engine.Delete delete = indexShard.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); - indexShard.delete(delete); - // update the request with the version so it will go to the replicas - request.versionType(delete.versionType().versionTypeForReplicationAndRecovery()); - request.version(delete.version()); - - assert request.versionType().validateVersionForWrites(request.version()); - DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()); - return new WriteResult<>(response, delete.getTranslogLocation()); + public static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) { + Engine.Delete delete = primary.prepareDeleteOnPrimary(request.type(), request.id(), request.version(), request.versionType()); + Engine.DeleteResult result = primary.delete(delete); + if (result.hasFailure() == false) { + // update the request with the version so it will go to the replicas + request.versionType(delete.versionType().versionTypeForReplicationAndRecovery()); + request.version(result.getVersion()); + assert request.versionType().validateVersionForWrites(request.version()); + } + return result; } - public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { - Engine.Delete delete = indexShard.prepareDeleteOnReplica(request.type(), request.id(), request.version(), request.versionType()); - indexShard.delete(delete); - return delete; + public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) { + Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(), request.version(), request.versionType()); + return replica.delete(delete); } } diff --git a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java index 38dd10df963..93045182f4c 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -101,6 +101,9 @@ public class GetRequest extends SingleShardRequest implements Realti validationException = ValidateActions.addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } + if (versionType == VersionType.FORCE) { + validationException = ValidateActions.addValidationError("version type [force] may no longer be used", validationException); + } return validationException; } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index cc3fbb7906d..a6adaa12fef 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -39,11 +39,11 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -60,7 +60,7 @@ import org.elasticsearch.transport.TransportService; *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportWriteAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -76,7 +76,7 @@ public class TransportIndexAction extends TransportWriteAction onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { - return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); + protected WritePrimaryResult shardOperationOnPrimary(IndexRequest request, IndexShard primary) throws Exception { + final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, mappingUpdatedAction); + final IndexResponse response = indexResult.hasFailure() ? null : + new IndexResponse(primary.shardId(), request.type(), request.id(), indexResult.getVersion(), + indexResult.isCreated()); + return new WritePrimaryResult(request, response, indexResult.getTranslogLocation(), indexResult.getFailure(), primary); } @Override - protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) { - return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation(); + protected WriteReplicaResult shardOperationOnReplica(IndexRequest request, IndexShard replica) throws Exception { + final Engine.IndexResult indexResult = executeIndexRequestOnReplica(request, replica); + return new WriteReplicaResult(request, indexResult.getTranslogLocation(), indexResult.getFailure(), replica); } /** * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ - public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) { - final ShardId shardId = indexShard.shardId(); + public static Engine.IndexResult executeIndexRequestOnReplica(IndexRequest request, IndexShard replica) { + final ShardId shardId = replica.shardId(); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + final Engine.Index operation; + try { + operation = replica.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + } catch (MapperParsingException e) { + return new Engine.IndexResult(e, request.version()); + } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } - indexShard.index(operation); - return operation; + return replica.index(operation); } /** Utility method to prepare an index operation on primary shards */ - public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) { + static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); } - public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, MappingUpdatedAction mappingUpdatedAction) throws Exception { - Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); + Engine.Index operation; + try { + operation = prepareIndexOperationOnPrimary(request, primary); + } catch (MapperParsingException | IllegalArgumentException e) { + return new Engine.IndexResult(e, request.version()); + } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final ShardId shardId = indexShard.shardId(); + final ShardId shardId = primary.shardId(); if (update != null) { - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); - operation = prepareIndexOperationOnPrimary(request, indexShard); + // can throw timeout exception when updating mappings or ISE for attempting to update default mappings + // which are bubbled up + try { + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); + } catch (IllegalArgumentException e) { + // throws IAE on conflicts merging dynamic mappings + return new Engine.IndexResult(e, request.version()); + } + try { + operation = prepareIndexOperationOnPrimary(request, primary); + } catch (MapperParsingException | IllegalArgumentException e) { + return new Engine.IndexResult(e, request.version()); + } update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new ReplicationOperation.RetryOnPrimaryException(shardId, "Dynamic mappings are not available on the node that holds the primary yet"); } } - indexShard.index(operation); - - // update the version on request so it will happen on the replicas - final long version = operation.version(); - request.version(version); - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - - assert request.versionType().validateVersionForWrites(request.version()); - - IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), operation.isCreated()); - return new WriteResult<>(response, operation.getTranslogLocation()); + Engine.IndexResult result = primary.index(operation); + if (result.hasFailure() == false) { + // update the version on request so it will happen on the replicas + final long version = result.getVersion(); + request.version(version); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + assert request.versionType().validateVersionForWrites(request.version()); + } + return result; } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java index fc14e0de2df..90cbce135af 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java @@ -32,4 +32,12 @@ public class DeletePipelineRequestBuilder extends ActionRequestBuilder, Re listener = new TaskResultStoringActionListener<>(taskManager, task, listener); } - if (filters.length == 0) { - try { - doExecute(task, request, listener); - } catch(Exception e) { - logger.trace("Error during transport action execution.", e); - listener.onFailure(e); - } - } else { - RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); - requestFilterChain.proceed(task, actionName, request, listener); - } + RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); + requestFilterChain.proceed(task, actionName, request, listener); } protected void doExecute(Task task, Request request, ActionListener listener) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index d541ef6a35c..c049336bafc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -112,22 +113,24 @@ public class ReplicationOperation< pendingActions.incrementAndGet(); primaryResult = primary.perform(request); final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); - assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; - if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + if (replicaRequest != null) { + assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; + if (logger.isTraceEnabled()) { + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); + } + + // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. + // we have to make sure that every operation indexed into the primary after recovery start will also be replicated + // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. + ClusterState clusterState = clusterStateSupplier.get(); + final List shards = getShards(primaryId, clusterState); + Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); + + markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); + + performOnReplicas(replicaRequest, shards); } - // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. - // we have to make sure that every operation indexed into the primary after recovery start will also be replicated - // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. - ClusterState clusterState = clusterStateSupplier.get(); - final List shards = getShards(primaryId, clusterState); - Set inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState); - - markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards); - - performOnReplicas(replicaRequest, shards); - successfulShards.incrementAndGet(); decPendingAndFinishIfNeeded(); } @@ -419,7 +422,11 @@ public class ReplicationOperation< public interface PrimaryResult> { - R replicaRequest(); + /** + * @return null if no operation needs to be sent to a replica + * (for example when the operation failed on the primary due to a parsing exception) + */ + @Nullable R replicaRequest(); void setShardInfo(ReplicationResponse.ShardInfo shardInfo); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index e6ce0a5aad4..98556494191 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -24,7 +24,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -34,6 +33,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Arrays; /** * Base class for write action responses. @@ -162,7 +162,11 @@ public class ReplicationResponse extends ActionResponse { @Override public String toString() { - return Strings.toString(this); + return "ShardInfo{" + + "total=" + total + + ", successful=" + successful + + ", failures=" + Arrays.toString(failures) + + '}'; } public static ShardInfo readShardInfo(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 740a003ffa8..6c19e526427 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.admin.indices.flush.ShardFlushRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.TransportAction; @@ -178,7 +179,7 @@ public abstract class TransportReplicationAction< * @param shardRequest the request to the replica shard * @param replica the replica shard to perform the operation on */ - protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica); + protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica) throws Exception; /** * Cluster level block to check before request execution @@ -207,7 +208,7 @@ public abstract class TransportReplicationAction< protected boolean retryPrimaryException(final Throwable e) { return e.getClass() == ReplicationOperation.RetryOnPrimaryException.class - || TransportActions.isShardNotAvailableException(e); + || TransportActions.isShardNotAvailableException(e); } class OperationTransportHandler implements TransportRequestHandler { @@ -310,17 +311,10 @@ public abstract class TransportReplicationAction< final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings()); final ActionListener listener = createResponseListener(primaryShardReference); - createReplicatedOperation(request, new ActionListener() { - @Override - public void onResponse(PrimaryResult result) { - result.respond(listener); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }, primaryShardReference, executeOnReplicas).execute(); + createReplicatedOperation(request, + ActionListener.wrap(result -> result.respond(listener), listener::onFailure), + primaryShardReference, executeOnReplicas) + .execute(); } } catch (Exception e) { Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller @@ -376,11 +370,24 @@ public abstract class TransportReplicationAction< protected class PrimaryResult implements ReplicationOperation.PrimaryResult { final ReplicaRequest replicaRequest; - final Response finalResponse; + final Response finalResponseIfSuccessful; + final Exception finalFailure; - public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + /** + * Result of executing a primary operation + * expects finalResponseIfSuccessful or finalFailure to be not-null + */ + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponseIfSuccessful, Exception finalFailure) { + assert finalFailure != null ^ finalResponseIfSuccessful != null + : "either a response or a failure has to be not null, " + + "found [" + finalFailure + "] failure and ["+ finalResponseIfSuccessful + "] response"; this.replicaRequest = replicaRequest; - this.finalResponse = finalResponse; + this.finalResponseIfSuccessful = finalResponseIfSuccessful; + this.finalFailure = finalFailure; + } + + public PrimaryResult(ReplicaRequest replicaRequest, Response replicationResponse) { + this(replicaRequest, replicationResponse, null); } @Override @@ -390,22 +397,37 @@ public abstract class TransportReplicationAction< @Override public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { - finalResponse.setShardInfo(shardInfo); + if (finalResponseIfSuccessful != null) { + finalResponseIfSuccessful.setShardInfo(shardInfo); + } } public void respond(ActionListener listener) { - listener.onResponse(finalResponse); + if (finalResponseIfSuccessful != null) { + listener.onResponse(finalResponseIfSuccessful); + } else { + listener.onFailure(finalFailure); + } } } protected class ReplicaResult { - /** - * Public constructor so subclasses can call it. - */ - public ReplicaResult() {} + final Exception finalFailure; + + public ReplicaResult(Exception finalFailure) { + this.finalFailure = finalFailure; + } + + public ReplicaResult() { + this(null); + } public void respond(ActionListener listener) { - listener.onResponse(TransportResponse.Empty.INSTANCE); + if (finalFailure == null) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } else { + listener.onFailure(finalFailure); + } } } @@ -481,6 +503,7 @@ public abstract class TransportReplicationAction< transportReplicaAction, request), e); + request.onRetry(); final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override @@ -902,7 +925,9 @@ public abstract class TransportReplicationAction< @Override public PrimaryResult perform(Request request) throws Exception { PrimaryResult result = shardOperationOnPrimary(request, indexShard); - result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); + if (result.replicaRequest() != null) { + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); + } return result; } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 05695b246ef..15f269c46f5 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -44,81 +44,63 @@ import java.util.function.Supplier; /** * Base class for transport actions that modify data in some shard like index, delete, and shardBulk. + * Allows performing async actions (e.g. refresh) after performing write operations on primary and replica shards */ public abstract class TransportWriteAction< Request extends ReplicatedWriteRequest, + ReplicaRequest extends ReplicatedWriteRequest, Response extends ReplicationResponse & WriteResponse - > extends TransportReplicationAction { + > extends TransportReplicationAction { protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - String executor) { + Supplier replicaRequest, String executor) { super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, request, executor); + indexNameExpressionResolver, request, replicaRequest, executor); } /** - * Called on the primary with a reference to the {@linkplain IndexShard} to modify. - */ - protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; - - /** - * Called once per replica with a reference to the {@linkplain IndexShard} to modify. + * Called on the primary with a reference to the primary {@linkplain IndexShard} to modify. * - * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred + * @return the result of the operation on primary, including current translog location and operation response and failure + * async refresh is performed on the primary shard according to the Request refresh policy */ - protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); - @Override - protected final WritePrimaryResult shardOperationOnPrimary(Request request, IndexShard primary) throws Exception { - WriteResult result = onPrimaryShard(request, primary); - return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), primary); - } - - @Override - protected final WriteReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { - Translog.Location location = onReplicaShard(request, replica); - return new WriteReplicaResult(replica, request, location); - } + protected abstract WritePrimaryResult shardOperationOnPrimary(Request request, IndexShard primary) throws Exception; /** - * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. + * Called once per replica with a reference to the replica {@linkplain IndexShard} to modify. + * + * @return the result of the operation on replica, including current translog location and operation response and failure + * async refresh is performed on the replica shard according to the ReplicaRequest refresh policy */ - public static class WriteResult { - private final Response response; - private final Translog.Location location; - - public WriteResult(Response response, @Nullable Location location) { - this.response = response; - this.location = location; - } - - public Response getResponse() { - return response; - } - - public Translog.Location getLocation() { - return location; - } - } + @Override + protected abstract WriteReplicaResult shardOperationOnReplica(ReplicaRequest request, IndexShard replica) throws Exception; /** * Result of taking the action on the primary. */ - class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + protected class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { boolean finishedAsyncActions; ActionListener listener = null; - public WritePrimaryResult(Request request, Response finalResponse, - @Nullable Translog.Location location, - IndexShard indexShard) { - super(request, finalResponse); - /* - * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the - * refresh in parallel on the primary and on the replica. - */ - new AsyncAfterWriteAction(indexShard, request, location, this, logger).run(); + public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse, + @Nullable Location location, @Nullable Exception operationFailure, + IndexShard primary) { + super(request, finalResponse, operationFailure); + assert location == null || operationFailure == null + : "expected either failure to be null or translog location to be null, " + + "but found: [" + location + "] translog location and [" + operationFailure + "] failure"; + if (operationFailure != null) { + this.finishedAsyncActions = true; + } else { + /* + * We call this before replication because this might wait for a refresh and that can take a while. + * This way we wait for the refresh in parallel on the primary and on the replica. + */ + new AsyncAfterWriteAction(primary, request, location, this, logger).run(); + } } @Override @@ -147,7 +129,7 @@ public abstract class TransportWriteAction< @Override public synchronized void onSuccess(boolean forcedRefresh) { - finalResponse.setForcedRefresh(forcedRefresh); + finalResponseIfSuccessful.setForcedRefresh(forcedRefresh); finishedAsyncActions = true; respondIfPossible(null); } @@ -156,12 +138,18 @@ public abstract class TransportWriteAction< /** * Result of taking the action on the replica. */ - class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { + protected class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { boolean finishedAsyncActions; private ActionListener listener; - public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { - new AsyncAfterWriteAction(indexShard, request, location, this, logger).run(); + public WriteReplicaResult(ReplicaRequest request, @Nullable Location location, + @Nullable Exception operationFailure, IndexShard replica) { + super(operationFailure); + if (operationFailure != null) { + this.finishedAsyncActions = true; + } else { + new AsyncAfterWriteAction(replica, request, location, this, logger).run(); + } } @Override diff --git a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 9e0d1a94119..14abc77513a 100644 --- a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -563,6 +563,11 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ DeletePipelineRequestBuilder prepareDeletePipeline(); + /** + * Deletes a stored ingest pipeline + */ + DeletePipelineRequestBuilder prepareDeletePipeline(String id); + /** * Returns a stored ingest pipeline */ diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 2dd5cb138e8..006040b8e16 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -1096,6 +1096,11 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE); } + @Override + public DeletePipelineRequestBuilder prepareDeletePipeline(String id) { + return new DeletePipelineRequestBuilder(this, DeletePipelineAction.INSTANCE, id); + } + @Override public void getPipeline(GetPipelineRequest request, ActionListener listener) { execute(GetPipelineAction.INSTANCE, request, listener); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index 3bbd56d9f49..82fb6476264 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -274,15 +274,16 @@ public class ClusterState implements ToXContent, Diffable { return routingNodes; } - public String prettyPrint() { + @Override + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); + final String TAB = " "; for (IndexMetaData indexMetaData : metaData) { - final String TAB = " "; sb.append(TAB).append(indexMetaData.getIndex()); sb.append(": v[").append(indexMetaData.getVersion()).append("]\n"); for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) { @@ -291,24 +292,19 @@ public class ClusterState implements ToXContent, Diffable { sb.append("isa_ids ").append(indexMetaData.inSyncAllocationIds(shard)).append("\n"); } } - sb.append(blocks().prettyPrint()); - sb.append(nodes().prettyPrint()); - sb.append(routingTable().prettyPrint()); - sb.append(getRoutingNodes().prettyPrint()); - return sb.toString(); - } - - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; + sb.append(blocks()); + sb.append(nodes()); + sb.append(routingTable()); + sb.append(getRoutingNodes()); + if (customs.isEmpty() == false) { + sb.append("customs:\n"); + for (ObjectObjectCursor cursor : customs) { + final String type = cursor.key; + final Custom custom = cursor.value; + sb.append(TAB).append(type).append(": ").append(custom); + } } + return sb.toString(); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 6a9d0ae160f..94333c10dde 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -36,6 +36,7 @@ import org.elasticsearch.discovery.zen.NodesFaultDetection; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ScheduledFuture; @@ -75,10 +76,10 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); } - public void connectToAddedNodes(ClusterChangedEvent event) { + public void connectToNodes(List addedNodes) { // TODO: do this in parallel (and wait) - for (final DiscoveryNode node : event.nodesDelta().addedNodes()) { + for (final DiscoveryNode node : addedNodes) { try (Releasable ignored = nodeLocks.acquire(node)) { Integer current = nodes.put(node, 0); assert current == null : "node " + node + " was added in event but already in internal nodes"; @@ -87,8 +88,8 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { } } - public void disconnectFromRemovedNodes(ClusterChangedEvent event) { - for (final DiscoveryNode node : event.nodesDelta().removedNodes()) { + public void disconnectFromNodes(List removedNodes) { + for (final DiscoveryNode node : removedNodes) { try (Releasable ignored = nodeLocks.acquire(node)) { Integer current = nodes.remove(node); assert current != null : "node " + node + " was removed in event but not in internal nodes"; diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index ce6473ecb42..ee56d7a61a1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -164,7 +164,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state.prettyPrint(), shardEntry); + logger.trace("new cluster state [{}] after waiting for master election to fail shard entry [{}]", state, shardEntry); } sendShardAction(actionName, observer, shardEntry, listener); } diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index e6f04c8702c..12e6ee0f7ec 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -199,7 +199,8 @@ public class ClusterBlocks extends AbstractDiffable { return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet()))); } - public String prettyPrint() { + @Override + public String toString() { if (global.isEmpty() && indices().isEmpty()) { return ""; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index fd7e08fec31..3ee28f1662c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -97,10 +97,29 @@ public class MetaData implements Iterable, Diffable, Fr SNAPSHOT } + /** + * Indicates that this custom metadata will be returned as part of an API call but will not be persisted + */ public static EnumSet API_ONLY = EnumSet.of(XContentContext.API); + + /** + * Indicates that this custom metadata will be returned as part of an API call and will be persisted between + * node restarts, but will not be a part of a snapshot global state + */ public static EnumSet API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY); + + /** + * Indicates that this custom metadata will be returned as part of an API call and stored as a part of + * a snapshot global state, but will not be persisted between node restarts + */ public static EnumSet API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT); + /** + * Indicates that this custom metadata will be returned as part of an API call, stored as a part of + * a snapshot global state, and will be persisted between node restarts + */ + public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); + public interface Custom extends Diffable, ToXContent { String type(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index e7c032a120b..f9034f6a29f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -352,7 +352,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { throw mpe; } - final QueryShardContext queryShardContext = indexService.newQueryShardContext(); + // the context is only used for validation so it's fine to pass fake values for the shard id and the current + // timestamp + final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L); for (Alias alias : request.aliases()) { if (Strings.hasLength(alias.filter())) { aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 5b0c5a84061..b97b77de64f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -149,7 +149,9 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } indices.put(action.getIndex(), indexService); } - aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext()); + // the context is only used for validation so it's fine to pass fake values for the shard id and the current + // timestamp + aliasValidator.validateAliasFilter(alias, filter, indexService.newQueryShardContext(0, null, () -> 0L)); } }; changed |= action.apply(newAliasValidator, metadata, index); diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index f3a8932f53c..895195d35b3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -26,7 +26,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -398,14 +397,6 @@ public class DiscoveryNodes extends AbstractDiffable implements @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("{"); - sb.append(Strings.collectionToDelimitedString(this, ",")); - sb.append("}"); - return sb.toString(); - } - - public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("nodes: \n"); for (DiscoveryNode node : this) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index bd5113029c4..ddb7969f60a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -391,7 +391,8 @@ public class RoutingNodes implements Iterable { return shards; } - public String prettyPrint() { + @Override + public String toString() { StringBuilder sb = new StringBuilder("routing_nodes:\n"); for (RoutingNode routingNode : this) { sb.append(routingNode.prettyPrint()); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 9dd2cc72da8..1c3d629a72f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -109,7 +109,7 @@ public class RoutingService extends AbstractLifecycleComponent { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state.prettyPrint()), e); + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); } else { logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } @@ -118,7 +118,7 @@ public class RoutingService extends AbstractLifecycleComponent { } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); - logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state.prettyPrint()), e); + logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 2d960ce0450..051fd12a12b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -613,7 +613,8 @@ public class RoutingTable implements Iterable, Diffable entry : indicesRouting) { sb.append(entry.value.prettyPrint()).append('\n'); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java index 52ace3744cd..fa30a102bf6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetaDataUpdater.java @@ -233,7 +233,7 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting Set oldInSyncAllocations = oldIndexMetaData.inSyncAllocationIds(shardNumber); Set idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet()); assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) : - "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable.prettyPrint(); + "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable; Set remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove); assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " + shardEntry.getKey() + " (before: " + oldInSyncAllocations + ", ids to remove: " + idsToRemove + ")"; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 93b9c90e490..3a6c1c45f01 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -126,6 +126,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards balancer.balance(); } + /** + * Returns a decision on rebalancing a single shard to form a more optimal cluster balance. This + * method is not used in itself for cluster rebalancing because all shards from all indices are + * taken into account when making rebalancing decisions. This method is only intended to be used + * from the cluster allocation explain API to explain possible rebalancing decisions for a single + * shard. + */ + public RebalanceDecision decideRebalance(final ShardRouting shard, final RoutingAllocation allocation) { + assert allocation.debugDecision() : "debugDecision should be set in explain mode"; + return new Balancer(logger, allocation, weightFunction, threshold).decideRebalance(shard); + } + /** * Returns the currently configured delta threshold */ @@ -267,11 +279,18 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return new NodeSorter(nodesArray(), weight, this); } + /** + * The absolute value difference between two weights. + */ private static float absDelta(float lower, float higher) { assert higher >= lower : higher + " lt " + lower +" but was expected to be gte"; return Math.abs(higher - lower); } + /** + * Returns {@code true} iff the weight delta between two nodes is under a defined threshold. + * See {@link #THRESHOLD_SETTING} for defining the threshold. + */ private static boolean lessThan(float delta, float threshold) { /* deltas close to the threshold are "rounded" to the threshold manually to prevent floating point problems if the delta is very close to the @@ -309,6 +328,110 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards balanceByWeights(); } + /** + * Makes a decision about moving a single shard to a different node to form a more + * optimally balanced cluster. This method is invoked from the cluster allocation + * explain API only. + */ + private RebalanceDecision decideRebalance(final ShardRouting shard) { + if (shard.started() == false) { + // cannot rebalance a shard that isn't started + return RebalanceDecision.NOT_TAKEN; + } + + Decision canRebalance = allocation.deciders().canRebalance(shard, allocation); + + if (allocation.hasPendingAsyncFetch()) { + return new RebalanceDecision( + canRebalance, + Type.NO, + "cannot rebalance due to in-flight shard store fetches, otherwise allocation may prematurely rebalance a shard to " + + "a node that is soon to receive another shard assignment upon completion of the shard store fetch, " + + "rendering the cluster imbalanced again" + ); + } + + sorter.reset(shard.getIndexName()); + ModelNode[] modelNodes = sorter.modelNodes; + final String currentNodeId = shard.currentNodeId(); + // find currently assigned node + ModelNode currentNode = null; + for (ModelNode node : modelNodes) { + if (node.getNodeId().equals(currentNodeId)) { + currentNode = node; + break; + } + } + assert currentNode != null : "currently assigned node could not be found"; + + // balance the shard, if a better node can be found + final float currentWeight = sorter.weight(currentNode); + final AllocationDeciders deciders = allocation.deciders(); + final String idxName = shard.getIndexName(); + Map nodeDecisions = new HashMap<>(modelNodes.length - 1); + Type rebalanceDecisionType = Type.NO; + String assignedNodeId = null; + for (ModelNode node : modelNodes) { + if (node == currentNode) { + continue; // skip over node we're currently allocated to it + } + final Decision canAllocate = deciders.canAllocate(shard, node.getRoutingNode(), allocation); + // the current weight of the node in the cluster, as computed by the weight function; + // this is a comparison of the number of shards on this node to the number of shards + // that should be on each node on average (both taking the cluster as a whole into account + // as well as shards per index) + final float nodeWeight = sorter.weight(node); + // if the node we are examining has a worse (higher) weight than the node the shard is + // assigned to, then there is no way moving the shard to the node with the worse weight + // can make the balance of the cluster better, so we check for that here + final boolean betterWeightThanCurrent = nodeWeight <= currentWeight; + boolean rebalanceConditionsMet = false; + boolean deltaAboveThreshold = false; + float weightWithShardAdded = Float.POSITIVE_INFINITY; + if (betterWeightThanCurrent) { + // get the delta between the weights of the node we are checking and the node that holds the shard + final float currentDelta = absDelta(nodeWeight, currentWeight); + // checks if the weight delta is above a certain threshold; if it is not above a certain threshold, + // then even though the node we are examining has a better weight and may make the cluster balance + // more even, it doesn't make sense to execute the heavyweight operation of relocating a shard unless + // the gains make it worth it, as defined by the threshold + deltaAboveThreshold = lessThan(currentDelta, threshold) == false; + // simulate the weight of the node if we were to relocate the shard to it + weightWithShardAdded = weight.weightShardAdded(this, node, idxName); + // calculate the delta of the weights of the two nodes if we were to add the shard to the + // node in question and move it away from the node that currently holds it. + final float proposedDelta = weightWithShardAdded - weight.weightShardRemoved(this, currentNode, idxName); + rebalanceConditionsMet = deltaAboveThreshold && proposedDelta < currentDelta; + // if the simulated weight delta with the shard moved away is better than the weight delta + // with the shard remaining on the current node, and we are allowed to allocate to the + // node in question, then allow the rebalance + if (rebalanceConditionsMet && canAllocate.type().higherThan(rebalanceDecisionType)) { + // rebalance to the node, only will get overwritten if the decision here is to + // THROTTLE and we get a decision with YES on another node + rebalanceDecisionType = canAllocate.type(); + assignedNodeId = node.getNodeId(); + } + } + nodeDecisions.put(node.getNodeId(), new NodeRebalanceDecision( + rebalanceConditionsMet ? canAllocate.type() : Type.NO, + canAllocate, + betterWeightThanCurrent, + deltaAboveThreshold, + nodeWeight, + weightWithShardAdded) + ); + } + + + if (canRebalance.type() != Type.YES) { + return new RebalanceDecision(canRebalance, canRebalance.type(), "rebalancing is not allowed", null, + nodeDecisions, currentWeight); + } else { + return RebalanceDecision.decision(canRebalance, rebalanceDecisionType, assignedNodeId, + nodeDecisions, currentWeight, threshold); + } + } + public Map weighShard(ShardRouting shard) { final ModelNode[] modelNodes = sorter.modelNodes; final float[] weights = sorter.weights; @@ -539,7 +662,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public MoveDecision makeMoveDecision(final ShardRouting shardRouting) { if (shardRouting.started() == false) { // we can only move started shards - return MoveDecision.DECISION_NOT_TAKEN; + return MoveDecision.NOT_TAKEN; } final boolean explain = allocation.debugDecision(); @@ -1110,15 +1233,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private final String finalExplanation; @Nullable private final String assignedNodeId; - @Nullable - private final Map nodeDecisions; - protected RelocationDecision(Type finalDecision, String finalExplanation, String assignedNodeId, - Map nodeDecisions) { + protected RelocationDecision(Type finalDecision, String finalExplanation, String assignedNodeId) { this.finalDecision = finalDecision; this.finalExplanation = finalExplanation; this.assignedNodeId = assignedNodeId; - this.nodeDecisions = nodeDecisions; } /** @@ -1153,15 +1272,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public String getAssignedNodeId() { return assignedNodeId; } - - /** - * Gets the individual node-level decisions that went into making the final decision as represented by - * {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link WeightedDecision}. - */ - @Nullable - public Map getNodeDecisions() { - return nodeDecisions; - } } /** @@ -1169,18 +1279,21 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards */ public static final class MoveDecision extends RelocationDecision { /** a constant representing no decision taken */ - public static final MoveDecision DECISION_NOT_TAKEN = new MoveDecision(null, null, null, null, null); + public static final MoveDecision NOT_TAKEN = new MoveDecision(null, null, null, null, null); /** cached decisions so we don't have to recreate objects for common decisions when not in explain mode. */ private static final MoveDecision CACHED_STAY_DECISION = new MoveDecision(Decision.YES, Type.NO, null, null, null); private static final MoveDecision CACHED_CANNOT_MOVE_DECISION = new MoveDecision(Decision.NO, Type.NO, null, null, null); @Nullable private final Decision canRemainDecision; + @Nullable + private final Map nodeDecisions; private MoveDecision(Decision canRemainDecision, Type finalDecision, String finalExplanation, String assignedNodeId, Map nodeDecisions) { - super(finalDecision, finalExplanation, assignedNodeId, nodeDecisions); + super(finalDecision, finalExplanation, assignedNodeId); this.canRemainDecision = canRemainDecision; + this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; } /** @@ -1250,6 +1363,147 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public boolean cannotRemain() { return isDecisionTaken() && canRemainDecision.type() == Type.NO; } + + /** + * Gets the individual node-level decisions that went into making the final decision as represented by + * {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link WeightedDecision}. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } + } + + /** + * Represents a decision to move a started shard to form a more optimally balanced cluster. + */ + public static final class RebalanceDecision extends RelocationDecision { + /** a constant representing no decision taken */ + public static final RebalanceDecision NOT_TAKEN = new RebalanceDecision(null, null, null, null, null, Float.POSITIVE_INFINITY); + + @Nullable + private final Decision canRebalanceDecision; + @Nullable + private final Map nodeDecisions; + private float currentWeight; + + protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation) { + this(canRebalanceDecision, finalDecision, finalExplanation, null, null, Float.POSITIVE_INFINITY); + } + + protected RebalanceDecision(Decision canRebalanceDecision, Type finalDecision, String finalExplanation, + String assignedNodeId, Map nodeDecisions, float currentWeight) { + super(finalDecision, finalExplanation, assignedNodeId); + this.canRebalanceDecision = canRebalanceDecision; + this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null; + this.currentWeight = currentWeight; + } + + /** + * Creates a new {@link RebalanceDecision}, computing the explanation based on the decision parameters. + */ + public static RebalanceDecision decision(Decision canRebalanceDecision, Type finalDecision, String assignedNodeId, + Map nodeDecisions, float currentWeight, float threshold) { + final String explanation = produceFinalExplanation(finalDecision, assignedNodeId, threshold); + return new RebalanceDecision(canRebalanceDecision, finalDecision, explanation, assignedNodeId, nodeDecisions, currentWeight); + } + + /** + * Returns the decision for being allowed to rebalance the shard. + */ + @Nullable + public Decision getCanRebalanceDecision() { + return canRebalanceDecision; + } + + /** + * Gets the individual node-level decisions that went into making the final decision as represented by + * {@link #getFinalDecisionType()}. The map that is returned has the node id as the key and a {@link NodeRebalanceDecision}. + */ + @Nullable + public Map getNodeDecisions() { + return nodeDecisions; + } + + private static String produceFinalExplanation(final Type finalDecisionType, final String assignedNodeId, final float threshold) { + final String finalExplanation; + if (assignedNodeId != null) { + if (finalDecisionType == Type.THROTTLE) { + finalExplanation = "throttle moving shard to node [" + assignedNodeId + "], as it is " + + "currently busy with other shard relocations"; + } else { + finalExplanation = "moving shard to node [" + assignedNodeId + "] to form a more balanced cluster"; + } + } else { + finalExplanation = "cannot rebalance shard, no other node exists that would form a more balanced " + + "cluster within the defined threshold [" + threshold + "]"; + } + return finalExplanation; + } + } + + /** + * A node-level explanation for the decision to rebalance a shard. + */ + public static final class NodeRebalanceDecision { + private final Type nodeDecisionType; + private final Decision canAllocate; + private final boolean betterWeightThanCurrent; + private final boolean deltaAboveThreshold; + private final float currentWeight; + private final float weightWithShardAdded; + + NodeRebalanceDecision(Type nodeDecisionType, Decision canAllocate, boolean betterWeightThanCurrent, + boolean deltaAboveThreshold, float currentWeight, float weightWithShardAdded) { + this.nodeDecisionType = Objects.requireNonNull(nodeDecisionType); + this.canAllocate = Objects.requireNonNull(canAllocate); + this.betterWeightThanCurrent = betterWeightThanCurrent; + this.deltaAboveThreshold = deltaAboveThreshold; + this.currentWeight = currentWeight; + this.weightWithShardAdded = weightWithShardAdded; + } + + /** + * Returns the decision to rebalance to the node. + */ + public Type getNodeDecisionType() { + return nodeDecisionType; + } + + /** + * Returns whether the shard is allowed to be allocated to the node. + */ + public Decision getCanAllocateDecision() { + return canAllocate; + } + + /** + * Returns whether the weight of the node is better than the weight of the node where the shard currently resides. + */ + public boolean isBetterWeightThanCurrent() { + return betterWeightThanCurrent; + } + + /** + * Returns if the weight delta by assigning to this node was above the threshold to warrant a rebalance. + */ + public boolean isDeltaAboveThreshold() { + return deltaAboveThreshold; + } + + /** + * Returns the current weight of the node if the shard is not added to the node. + */ + public float getCurrentWeight() { + return currentWeight; + } + + /** + * Returns the weight of the node if the shard is added to the node. + */ + public float getWeightWithShardAdded() { + return weightWithShardAdded; + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index e6a3eba7437..11db0980f47 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -222,7 +222,7 @@ public abstract class Decision implements ToXContent { } /** - * Returns the explanation string, fully formatted. Only formats the string once + * Returns the explanation string, fully formatted. Only formats the string once. */ @Nullable public String getExplanation() { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 64bf5942142..1a38e3742fc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -103,28 +103,33 @@ public class EnableAllocationDecider extends AllocationDecider { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final Allocation enable; + final boolean usedIndexSetting; if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) { enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings()); + usedIndexSetting = true; } else { enable = this.enableAllocation; + usedIndexSetting = false; } switch (enable) { case ALL: return allocation.decision(Decision.YES, NAME, "all allocations are allowed"); case NONE: - return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); + return allocation.decision(Decision.NO, NAME, "no allocations are allowed due to {}", setting(enable, usedIndexSetting)); case NEW_PRIMARIES: if (shardRouting.primary() && shardRouting.active() == false && shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) { return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); } else { - return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); + return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden due to {}", + setting(enable, usedIndexSetting)); } case PRIMARIES: if (shardRouting.primary()) { return allocation.decision(Decision.YES, NAME, "primary allocations are allowed"); } else { - return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden"); + return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden due to {}", + setting(enable, usedIndexSetting)); } default: throw new IllegalStateException("Unknown allocation option"); @@ -139,33 +144,60 @@ public class EnableAllocationDecider extends AllocationDecider { Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); final Rebalance enable; + final boolean usedIndexSetting; if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) { enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings); + usedIndexSetting = true; } else { enable = this.enableRebalance; + usedIndexSetting = false; } switch (enable) { case ALL: return allocation.decision(Decision.YES, NAME, "all rebalancing is allowed"); case NONE: - return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed"); + return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to {}", setting(enable, usedIndexSetting)); case PRIMARIES: if (shardRouting.primary()) { return allocation.decision(Decision.YES, NAME, "primary rebalancing is allowed"); } else { - return allocation.decision(Decision.NO, NAME, "replica rebalancing is forbidden"); + return allocation.decision(Decision.NO, NAME, "replica rebalancing is forbidden due to {}", + setting(enable, usedIndexSetting)); } case REPLICAS: if (shardRouting.primary() == false) { return allocation.decision(Decision.YES, NAME, "replica rebalancing is allowed"); } else { - return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden"); + return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden due to {}", + setting(enable, usedIndexSetting)); } default: throw new IllegalStateException("Unknown rebalance option"); } } + private static String setting(Allocation allocation, boolean usedIndexSetting) { + StringBuilder buf = new StringBuilder("["); + if (usedIndexSetting) { + buf.append(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); + } else { + buf.append(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey()); + } + buf.append("=").append(allocation.toString().toLowerCase(Locale.ROOT)).append("]"); + return buf.toString(); + } + + private static String setting(Rebalance rebalance, boolean usedIndexSetting) { + StringBuilder buf = new StringBuilder("["); + if (usedIndexSetting) { + buf.append(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey()); + } else { + buf.append(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey()); + } + buf.append("=").append(rebalance.toString().toLowerCase(Locale.ROOT)).append("]"); + return buf.toString(); + } + /** * Allocation values or rather their string representation to be used used with * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 38d42a0b0f9..4f637e05648 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -562,9 +562,9 @@ public class ClusterService extends AbstractLifecycleComponent { executionTime, previousClusterState.version(), tasksSummary, - previousClusterState.nodes().prettyPrint(), - previousClusterState.routingTable().prettyPrint(), - previousClusterState.getRoutingNodes().prettyPrint()), + previousClusterState.nodes(), + previousClusterState.routingTable(), + previousClusterState.getRoutingNodes()), e); } warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); @@ -656,7 +656,7 @@ public class ClusterService extends AbstractLifecycleComponent { newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED); if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState.prettyPrint()); + logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState); } else if (logger.isDebugEnabled()) { logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), tasksSummary); } @@ -671,7 +671,7 @@ public class ClusterService extends AbstractLifecycleComponent { } } - nodeConnectionsService.connectToAddedNodes(clusterChangedEvent); + nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes()); // if we are the master, publish the new state to all nodes // we publish here before we send a notification to all the listeners, since if it fails @@ -686,6 +686,8 @@ public class ClusterService extends AbstractLifecycleComponent { (Supplier) () -> new ParameterizedMessage( "failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version), t); + // ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state + nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes()); proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t)); return; } @@ -711,7 +713,7 @@ public class ClusterService extends AbstractLifecycleComponent { } } - nodeConnectionsService.disconnectFromRemovedNodes(clusterChangedEvent); + nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); newClusterState.status(ClusterState.ClusterStateStatus.APPLIED); @@ -757,7 +759,7 @@ public class ClusterService extends AbstractLifecycleComponent { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); - final String prettyPrint = newClusterState.prettyPrint(); + final String fullState = newClusterState.toString(); logger.warn( (Supplier) () -> new ParameterizedMessage( "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", @@ -765,7 +767,7 @@ public class ClusterService extends AbstractLifecycleComponent { version, stateUUID, tasksSummary, - prettyPrint), + fullState), e); // TODO: do we want to call updateTask.onFailure here? } @@ -824,9 +826,7 @@ public class ClusterService extends AbstractLifecycleComponent { (Supplier) () -> new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + "{}\nnew cluster state:\n{}", - source, - oldState.prettyPrint(), - newState.prettyPrint()), + source, oldState, newState), e); } } diff --git a/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java index e2868b23e89..2ed43ccaa24 100644 --- a/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java +++ b/core/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.component; import org.elasticsearch.common.settings.Settings; +import java.io.IOException; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -101,11 +102,17 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent imple listener.beforeClose(); } lifecycle.moveToClosed(); - doClose(); + try { + doClose(); + } catch (IOException e) { + // TODO: we need to separate out closing (ie shutting down) services, vs releasing runtime transient + // structures. Shutting down services should use IOUtils.close + logger.warn("failed to close " + getClass().getName(), e); + } for (LifecycleListener listener : listeners) { listener.afterClose(); } } - protected abstract void doClose(); + protected abstract void doClose() throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index 3de5c757ae1..21de0c421b7 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -69,7 +69,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { @Override public void writeByte(byte b) throws IOException { - ensureCapacity(count+1); + ensureCapacity(count + 1L); bytes.set(count, b); count++; } @@ -87,7 +87,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { } // get enough pages for new size - ensureCapacity(count+length); + ensureCapacity(((long) count) + length); // bulk copy bytes.set(count, b, offset, length); @@ -113,22 +113,17 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { } @Override - public void seek(long position) throws IOException { - if (position > Integer.MAX_VALUE) { - throw new IllegalArgumentException("position " + position + " > Integer.MAX_VALUE"); - } - - count = (int)position; - ensureCapacity(count); + public void seek(long position) { + ensureCapacity(position); + count = (int) position; } public void skip(int length) { - count += length; - ensureCapacity(count); + seek(((long) count) + length); } @Override - public void close() throws IOException { + public void close() { // empty for now. } @@ -156,7 +151,10 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { return bytes.ramBytesUsed(); } - private void ensureCapacity(int offset) { + private void ensureCapacity(long offset) { + if (offset > Integer.MAX_VALUE) { + throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data"); + } bytes = bigArrays.grow(bytes, offset); } diff --git a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java b/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java deleted file mode 100644 index a5dac12fab7..00000000000 --- a/core/src/main/java/org/elasticsearch/common/util/ExtensionPoint.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.util; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.multibindings.MapBinder; -import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.common.settings.Settings; - -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -/** - * This class defines an official elasticsearch extension point. It registers - * all extensions by a single name and ensures that extensions are not registered - * more than once. - */ -public abstract class ExtensionPoint { - protected final String name; - protected final Class[] singletons; - - /** - * Creates a new extension point - * - * @param name the human readable underscore case name of the extension point. This is used in error messages etc. - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - */ - public ExtensionPoint(String name, Class... singletons) { - this.name = name; - this.singletons = singletons; - } - - /** - * Binds the extension as well as the singletons to the given guice binder. - * - * @param binder the binder to use - */ - public final void bind(Binder binder) { - for (Class c : singletons) { - binder.bind(c).asEagerSingleton(); - } - bindExtensions(binder); - } - - /** - * Subclasses can bind their type, map or set extensions here. - */ - protected abstract void bindExtensions(Binder binder); - - /** - * A map based extension point which allows to register keyed implementations ie. parsers or some kind of strategies. - */ - public static class ClassMap extends ExtensionPoint { - protected final Class extensionClass; - protected final Map> extensions = new HashMap<>(); - private final Set reservedKeys; - - /** - * Creates a new {@link ClassMap} - * - * @param name the human readable underscore case name of the extension point. This is used in error messages etc. - * @param extensionClass the base class that should be extended - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - * @param reservedKeys a set of reserved keys by internal implementations - */ - public ClassMap(String name, Class extensionClass, Set reservedKeys, Class... singletons) { - super(name, singletons); - this.extensionClass = extensionClass; - this.reservedKeys = reservedKeys; - } - - /** - * Returns the extension for the given key or null - */ - public Class getExtension(String type) { - return extensions.get(type); - } - - /** - * Registers an extension class for a given key. This method will thr - * - * @param key the extensions key - * @param extension the extension - * @throws IllegalArgumentException iff the key is already registered or if the key is a reserved key for an internal implementation - */ - public final void registerExtension(String key, Class extension) { - if (extensions.containsKey(key) || reservedKeys.contains(key)) { - throw new IllegalArgumentException("Can't register the same [" + this.name + "] more than once for [" + key + "]"); - } - extensions.put(key, extension); - } - - @Override - protected final void bindExtensions(Binder binder) { - MapBinder parserMapBinder = MapBinder.newMapBinder(binder, String.class, extensionClass); - for (Map.Entry> clazz : extensions.entrySet()) { - parserMapBinder.addBinding(clazz.getKey()).to(clazz.getValue()); - } - } - } - - /** - * A Type extension point which basically allows to registered keyed extensions like {@link ClassMap} - * but doesn't instantiate and bind all the registered key value pairs but instead replace a singleton based on a given setting via {@link #bindType(Binder, Settings, String, String)} - * Note: {@link #bind(Binder)} is not supported by this class - */ - public static final class SelectedType extends ClassMap { - - public SelectedType(String name, Class extensionClass) { - super(name, extensionClass, Collections.emptySet()); - } - - /** - * Binds the extension class to the class that is registered for the give configured for the settings key in - * the settings object. - * - * @param binder the binder to use - * @param settings the settings to look up the key to find the implementation to bind - * @param settingsKey the key to use with the settings - * @param defaultValue the default value if the settings do not contain the key, or null if there is no default - * @return the actual bound type key - */ - public String bindType(Binder binder, Settings settings, String settingsKey, String defaultValue) { - final String type = settings.get(settingsKey, defaultValue); - if (type == null) { - throw new IllegalArgumentException("Missing setting [" + settingsKey + "]"); - } - final Class instance = getExtension(type); - if (instance == null) { - throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "] possible values: " - + extensions.keySet()); - } - if (extensionClass == instance) { - binder.bind(extensionClass).asEagerSingleton(); - } else { - binder.bind(extensionClass).to(instance).asEagerSingleton(); - } - return type; - } - - } - - /** - * A set based extension point which allows to register extended classes that might be used to chain additional functionality etc. - */ - public static final class ClassSet extends ExtensionPoint { - protected final Class extensionClass; - private final Set> extensions = new HashSet<>(); - - /** - * Creates a new {@link ClassSet} - * - * @param name the human readable underscore case name of the extension point. This is used in error messages etc. - * @param extensionClass the base class that should be extended - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - */ - public ClassSet(String name, Class extensionClass, Class... singletons) { - super(name, singletons); - this.extensionClass = extensionClass; - } - - /** - * Registers a new extension - * - * @param extension the extension to register - * @throws IllegalArgumentException iff the class is already registered - */ - public void registerExtension(Class extension) { - if (extensions.contains(extension)) { - throw new IllegalArgumentException("Can't register the same [" + this.name + "] more than once for [" + extension.getName() + "]"); - } - extensions.add(extension); - } - - @Override - protected void bindExtensions(Binder binder) { - Multibinder allocationMultibinder = Multibinder.newSetBinder(binder, extensionClass); - for (Class clazz : extensions) { - binder.bind(clazz).asEagerSingleton(); - allocationMultibinder.addBinding().to(clazz); - } - } - - public boolean isEmpty() { - return extensions.isEmpty(); - } - } - - /** - * A an instance of a map, mapping one instance value to another. Both key and value are instances, not classes - * like with other extension points. - */ - public static final class InstanceMap extends ExtensionPoint { - private final Map map = new HashMap<>(); - private final Class keyType; - private final Class valueType; - - /** - * Creates a new {@link ClassSet} - * - * @param name the human readable underscore case name of the extension point. This is used in error messages. - * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} - */ - public InstanceMap(String name, Class keyType, Class valueType, Class... singletons) { - super(name, singletons); - this.keyType = keyType; - this.valueType = valueType; - } - - /** - * Registers a mapping from {@code key} to {@code value} - * - * @throws IllegalArgumentException iff the key is already registered - */ - public void registerExtension(K key, V value) { - V old = map.put(key, value); - if (old != null) { - throw new IllegalArgumentException("Cannot register [" + this.name + "] with key [" + key + "] to [" + value + "], already registered to [" + old + "]"); - } - } - - @Override - protected void bindExtensions(Binder binder) { - MapBinder mapBinder = MapBinder.newMapBinder(binder, keyType, valueType); - for (Map.Entry entry : map.entrySet()) { - mapBinder.addBinding(entry.getKey()).toInstance(entry.getValue()); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java index 8d1b8efef51..478f3a8a08f 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java @@ -94,4 +94,9 @@ public interface XContentGenerator extends Closeable, Flushable { void copyCurrentStructure(XContentParser parser) throws IOException; + /** + * Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output. + */ + boolean isClosed(); + } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 74e1cb5e58f..763fac4c6a3 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -419,4 +419,8 @@ public class JsonXContentGenerator implements XContentGenerator { generator.close(); } + @Override + public boolean isClosed() { + return generator.isClosed(); + } } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index c745c7d6c0c..a94bf63e270 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -185,8 +185,8 @@ public class XContentMapValues { // we want all sub properties to match as soon as an object matches return (map) -> filter(map, - include, include.getInitialState(), - exclude, exclude.getInitialState(), + include, 0, + exclude, 0, matchAllAutomaton); } @@ -237,7 +237,7 @@ public class XContentMapValues { // the object matched, so consider that the include matches every inner property // we only care about excludes now subIncludeAutomaton = matchAllAutomaton; - subIncludeState = includeAutomaton.getInitialState(); + subIncludeState = 0; } } diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index ef3018c19b7..61316a852bb 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -19,20 +19,6 @@ package org.elasticsearch.discovery; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.ExtensionPoint; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.discovery.zen.ZenPing; -import org.elasticsearch.discovery.zen.ZenPingService; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.discovery.zen.UnicastZenPing; - import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -41,6 +27,16 @@ import java.util.Objects; import java.util.function.Function; import java.util.function.Supplier; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.transport.TransportService; + /** * A module for loading classes for node discovery. */ @@ -52,8 +48,7 @@ public class DiscoveryModule extends AbstractModule { new Setting<>("discovery.zen.hosts_provider", DISCOVERY_TYPE_SETTING, Function.identity(), Property.NodeScope); private final Settings settings; - private final Map> unicastHostProviders; - private final ExtensionPoint.ClassSet zenPings = new ExtensionPoint.ClassSet<>("zen_ping", ZenPing.class); + private final UnicastHostsProvider hostsProvider; private final Map> discoveryTypes = new HashMap<>(); public DiscoveryModule(Settings settings, TransportService transportService, NetworkService networkService, @@ -62,16 +57,30 @@ public class DiscoveryModule extends AbstractModule { addDiscoveryType("none", NoneDiscovery.class); addDiscoveryType("zen", ZenDiscovery.class); - Map> hostProviders = new HashMap<>(); - hostProviders.put("zen", () -> Collections::emptyList); - for (DiscoveryPlugin plugin : plugins) { - plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { - if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { - throw new IllegalArgumentException("Cannot specify zen hosts provider [" + entry.getKey() + "] twice"); - } - }); + String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); + if (discoveryType.equals("none") == false) { + Map> hostProviders = new HashMap<>(); + hostProviders.put("zen", () -> Collections::emptyList); + for (DiscoveryPlugin plugin : plugins) { + plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { + if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Cannot specify zen hosts provider [" + entry.getKey() + "] twice"); + } + }); + } + String hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); + Supplier hostsProviderSupplier = hostProviders.get(hostsProviderName); + if (hostsProviderSupplier == null) { + throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName + "]"); + } + hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get()); + } else { + hostsProvider = null; } - unicastHostProviders = Collections.unmodifiableMap(hostProviders); + } + + public UnicastHostsProvider getHostsProvider() { + return hostsProvider; } /** @@ -84,10 +93,6 @@ public class DiscoveryModule extends AbstractModule { discoveryTypes.put(type, clazz); } - public void addZenPing(Class clazz) { - zenPings.registerExtension(clazz); - } - @Override protected void configure() { String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); @@ -97,18 +102,7 @@ public class DiscoveryModule extends AbstractModule { } if (discoveryType.equals("none") == false) { - bind(ZenPingService.class).asEagerSingleton(); - String hostsProviderName = DISCOVERY_HOSTS_PROVIDER_SETTING.get(settings); - Supplier hostsProviderSupplier = unicastHostProviders.get(hostsProviderName); - if (hostsProviderSupplier == null) { - throw new IllegalArgumentException("Unknown zen hosts provider [" + hostsProviderName + "]"); - } - UnicastHostsProvider hostsProvider = Objects.requireNonNull(hostsProviderSupplier.get()); bind(UnicastHostsProvider.class).toInstance(hostsProvider); - if (zenPings.isEmpty()) { - zenPings.registerExtension(UnicastZenPing.class); - } - zenPings.bind(binder()); } bind(Discovery.class).to(discoveryClass).asEagerSingleton(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java index f1f8b28ad09..715e8be03ef 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import java.io.Closeable; + import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; @@ -36,7 +38,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; * A base class for {@link MasterFaultDetection} & {@link NodesFaultDetection}, * making sure both use the same setting. */ -public abstract class FaultDetection extends AbstractComponent { +public abstract class FaultDetection extends AbstractComponent implements Closeable { public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); @@ -80,6 +82,7 @@ public abstract class FaultDetection extends AbstractComponent { } } + @Override public void close() { transportService.removeConnectionListener(connectionListener); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 3c8deee7c5f..6d77e2f48fe 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -464,7 +464,7 @@ public class NodeJoinController extends AbstractComponent { } private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { - assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint(); + assert currentState.nodes().getMasterNodeId() == null : currentState; DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index bd8e2353f72..f6870cc05b6 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -19,6 +19,26 @@ package org.elasticsearch.discovery.zen; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -30,8 +50,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; @@ -56,34 +75,13 @@ import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; - import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.discovery.zen.ZenPing.PingResponse.readPingResponse; -public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPing { +public class UnicastZenPing extends AbstractComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = @@ -125,15 +123,13 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin private volatile boolean closed = false; - @Inject public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, - UnicastHostsProvider unicastHostsProviders) { + UnicastHostsProvider unicastHostsProvider) { super(settings); this.threadPool = threadPool; this.transportService = transportService; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - - this.hostsProvider = unicastHostsProviders; + this.hostsProvider = unicastHostsProvider; this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); List hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); @@ -190,26 +186,14 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin } @Override - protected void doStart() { - } - - @Override - protected void doStop() { - } - - @Override - protected void doClose() { + public void close() throws IOException { ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS); - try { - IOUtils.close(receivedResponses.values()); - } catch (IOException e) { - throw new ElasticsearchException("Error wile closing send ping handlers", e); - } + IOUtils.close(receivedResponses.values()); closed = true; } @Override - public void setPingContextProvider(PingContextProvider contextProvider) { + public void start(PingContextProvider contextProvider) { this.contextProvider = contextProvider; } @@ -501,9 +485,6 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin } private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) { - if (!lifecycle.started()) { - throw new IllegalStateException("received ping request while not started"); - } temporalResponses.add(request.pingResponse); threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() { @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 46f3a6fb315..90e7d3e2144 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -22,6 +22,7 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -67,6 +68,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -105,7 +107,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private AllocationService allocationService; private final ClusterName clusterName; private final DiscoverySettings discoverySettings; - private final ZenPingService pingService; + private final ZenPing zenPing; private final MasterFaultDetection masterFD; private final NodesFaultDetection nodesFD; private final PublishClusterStateAction publishClusterState; @@ -137,18 +139,16 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; @Inject - public ZenDiscovery(Settings settings, ThreadPool threadPool, - TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings, - ZenPingService pingService) { + public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPing zenPing) { super(settings); this.clusterService = clusterService; this.clusterName = clusterService.getClusterName(); this.transportService = transportService; this.discoverySettings = new DiscoverySettings(settings, clusterSettings); - this.pingService = pingService; + this.zenPing = zenPing; this.electMaster = new ElectMasterService(settings); this.pingTimeout = PING_TIMEOUT_SETTING.get(settings); - this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); this.joinRetryAttempts = JOIN_RETRY_ATTEMPTS_SETTING.get(settings); this.joinRetryDelay = JOIN_RETRY_DELAY_SETTING.get(settings); @@ -171,7 +171,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); - this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterService.getClusterName()); this.nodesFD.addListener(new NodeFaultDetectionListener()); @@ -183,9 +182,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover new NewPendingClusterStateListener(), discoverySettings, clusterService.getClusterName()); - this.pingService.setPingContextProvider(this); this.membership = new MembershipAction(settings, transportService, this, new MembershipListener()); - this.joinThreadControl = new JoinThreadControl(threadPool); transportService.registerRequestHandler( @@ -201,7 +198,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover protected void doStart() { nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); - pingService.start(); + zenPing.start(this); this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings); this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::rejoin, logger); } @@ -233,7 +230,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover @Override protected void doStop() { joinThreadControl.stop(); - pingService.stop(); masterFD.stop("zen disco stop"); nodesFD.stop(); DiscoveryNodes nodes = nodes(); @@ -264,10 +260,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } @Override - protected void doClose() { - masterFD.close(); - nodesFD.close(); - pingService.close(); + protected void doClose() throws IOException { + IOUtils.close(masterFD, nodesFD, zenPing); } @Override @@ -322,6 +316,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover // update the set of nodes to ping after the new cluster state has been published nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + + // clean the pending cluster queue - we are currently master, so any pending cluster state should be failed + // note that we also clean the queue on master failure (see handleMasterGone) but a delayed cluster state publish + // from a stale master can still make it in the queue during the election (but not be committed) + publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("elected as master")); } /** @@ -362,6 +361,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return publishClusterState.pendingStatesQueue().pendingClusterStates(); } + PendingClusterStatesQueue pendingClusterStatesQueue() { + return publishClusterState.pendingStatesQueue(); + } + /** * the main function of a join thread. This function is guaranteed to join the cluster * or spawn a new join thread upon failure to do so. @@ -862,7 +865,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private DiscoveryNode findMaster() { logger.trace("starting to ping"); - List fullPingResponses = pingService.pingAndWait(pingTimeout).toList(); + List fullPingResponses = pingAndWait(pingTimeout).toList(); if (fullPingResponses == null) { logger.trace("No full ping responses"); return null; @@ -1004,6 +1007,28 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover } } + private ZenPing.PingCollection pingAndWait(TimeValue timeout) { + final ZenPing.PingCollection response = new ZenPing.PingCollection(); + final CountDownLatch latch = new CountDownLatch(1); + try { + zenPing.ping(pings -> { + response.addPings(pings); + latch.countDown(); + }, timeout); + } catch (Exception ex) { + logger.warn("Ping execution failed", ex); + latch.countDown(); + } + + try { + latch.await(); + return response; + } catch (InterruptedException e) { + logger.trace("pingAndWait interrupted"); + return response; + } + } + private class NewPendingClusterStateListener implements PublishClusterStateAction.NewPendingClusterStateListener { @Override diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java index be1df88d334..cb2c8cb5019 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPing.java @@ -19,15 +19,7 @@ package org.elasticsearch.discovery.zen; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.TimeValue; - +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -36,11 +28,19 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.TimeValue; + import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -public interface ZenPing extends LifecycleComponent { +public interface ZenPing extends Closeable { - void setPingContextProvider(PingContextProvider contextProvider); + void start(PingContextProvider contextProvider); void ping(PingListener listener, TimeValue timeout); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenPingService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenPingService.java deleted file mode 100644 index 3aa3017f549..00000000000 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenPingService.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.zen; - -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; - -public class ZenPingService extends AbstractLifecycleComponent { - - private List zenPings = Collections.emptyList(); - - @Inject - public ZenPingService(Settings settings, Set zenPings) { - super(settings); - this.zenPings = Collections.unmodifiableList(new ArrayList<>(zenPings)); - } - - public List zenPings() { - return this.zenPings; - } - - public void setPingContextProvider(PingContextProvider contextProvider) { - if (lifecycle.started()) { - throw new IllegalStateException("Can't set nodes provider when started"); - } - for (ZenPing zenPing : zenPings) { - zenPing.setPingContextProvider(contextProvider); - } - } - - @Override - protected void doStart() { - for (ZenPing zenPing : zenPings) { - zenPing.start(); - } - } - - @Override - protected void doStop() { - for (ZenPing zenPing : zenPings) { - zenPing.stop(); - } - } - - @Override - protected void doClose() { - for (ZenPing zenPing : zenPings) { - zenPing.close(); - } - } - - public ZenPing.PingCollection pingAndWait(TimeValue timeout) { - final ZenPing.PingCollection response = new ZenPing.PingCollection(); - final CountDownLatch latch = new CountDownLatch(zenPings.size()); - for (ZenPing zenPing : zenPings) { - final AtomicBoolean counted = new AtomicBoolean(); - try { - zenPing.ping(pings -> { - response.addPings(pings); - if (counted.compareAndSet(false, true)) { - latch.countDown(); - } - }, timeout); - } catch (Exception ex) { - logger.warn("Ping execution failed", ex); - if (counted.compareAndSet(false, true)) { - latch.countDown(); - } - } - } - try { - latch.await(); - return response; - } catch (InterruptedException e) { - logger.trace("pingAndWait interrupted"); - return response; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index ffcc7648293..5fc0d1d27cd 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShadowIndexShard; @@ -144,7 +145,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust this.indexAnalyzers = registry.build(indexSettings); this.similarityService = similarityService; this.mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, - IndexService.this::newQueryShardContext); + // we parse all percolator queries as they would be parsed on shard 0 + () -> newQueryShardContext(0, null, () -> { + throw new IllegalArgumentException("Percolator queries are not allowed to use the curent timestamp"); + })); this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); this.shardStoreDeleter = shardStoreDeleter; this.bigArrays = bigArrays; @@ -452,7 +456,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust /** * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via - * {@link QueryShardContext#setTypes(String...)} + * {@link QueryShardContext#setTypes(String...)}. + * + * Passing a {@code null} {@link IndexReader} will return a valid context, however it won't be able to make + * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ public QueryShardContext newQueryShardContext(int shardId, IndexReader indexReader, LongSupplier nowInMillis) { return new QueryShardContext( @@ -463,15 +470,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust nowInMillis); } - /** - * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via - * {@link QueryShardContext#setTypes(String...)}. This context may be used for query parsing but cannot be - * used for rewriting since it does not know about the current {@link IndexReader}. - */ - public QueryShardContext newQueryShardContext() { - return newQueryShardContext(0, null, System::currentTimeMillis); - } - /** * The {@link ThreadPool} to use for this index. */ @@ -692,7 +690,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust if (shard.isRefreshNeeded()) { shard.refresh("schedule"); } - } catch (EngineClosedException | AlreadyClosedException ex) { + } catch (IndexShardClosedException | AlreadyClosedException ex) { // fine - continue; } continue; diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 513e87878d6..215013bf246 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -133,23 +133,20 @@ public final class IndexingSlowLog implements IndexingOperationListener { this.reformat = reformat; } - @Override - public void postIndex(Engine.Index index, boolean created) { - final long took = index.endTime() - index.startTime(); - postIndexing(index.parsedDoc(), took); - } - - - private void postIndexing(ParsedDocument doc, long tookInNanos) { - if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); - } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); - } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); - } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + public void postIndex(Engine.Index indexOperation, Engine.IndexResult result) { + if (result.hasFailure() == false) { + final ParsedDocument doc = indexOperation.parsedDoc(); + final long tookInNanos = result.getTook(); + if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { + indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { + indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { + indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { + indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java index f3f806fb831..8cad7823cb4 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteFailedEngineException.java @@ -24,12 +24,13 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +/** + * Deprecated as not used in 6.0, should be removed in 7.0 + * Still exists for bwc in serializing/deserializing from + * 5.x nodes + */ +@Deprecated public class DeleteFailedEngineException extends EngineException { - - public DeleteFailedEngineException(ShardId shardId, Engine.Delete delete, Throwable cause) { - super(shardId, "Delete failed for [" + delete.uid().text() + "]", cause); - } - public DeleteFailedEngineException(StreamInput in) throws IOException{ super(in); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index baacc4b240d..b145a86e43d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -48,4 +48,12 @@ class DeleteVersionValue extends VersionValue { public long ramBytesUsed() { return BASE_RAM_BYTES_USED; } + + @Override + public String toString() { + return "DeleteVersionValue{" + + "version=" + version() + ", " + + "time=" + time + + '}'; + } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 17822a71332..0da96def3ef 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -77,6 +77,7 @@ import java.util.Base64; import java.util.Comparator; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -277,9 +278,135 @@ public abstract class Engine implements Closeable { } } - public abstract void index(Index operation) throws EngineException; + /** + * Perform document index operation on the engine + * @param index operation to perform + * @return {@link IndexResult} containing updated translog location, version and + * document specific failures + * + * Note: engine level failures (i.e. persistent engine failures) are thrown + */ + public abstract IndexResult index(final Index index); - public abstract void delete(Delete delete) throws EngineException; + /** + * Perform document delete operation on the engine + * @param delete operation to perform + * @return {@link DeleteResult} containing updated translog location, version and + * document specific failures + * + * Note: engine level failures (i.e. persistent engine failures) are thrown + */ + public abstract DeleteResult delete(final Delete delete); + + /** + * Base class for index and delete operation results + * Holds result meta data (e.g. translog location, updated version) + * for an executed write {@link Operation} + **/ + public abstract static class Result { + private final Operation.TYPE operationType; + private final long version; + private final Exception failure; + private final SetOnce freeze = new SetOnce<>(); + private Translog.Location translogLocation; + private long took; + + protected Result(Operation.TYPE operationType, Exception failure, long version) { + this.operationType = operationType; + this.failure = failure; + this.version = version; + } + + protected Result(Operation.TYPE operationType, long version) { + this(operationType, null, version); + } + + /** whether the operation had failure */ + public boolean hasFailure() { + return failure != null; + } + + /** get the updated document version */ + public long getVersion() { + return version; + } + + /** get the translog location after executing the operation */ + public Translog.Location getTranslogLocation() { + return translogLocation; + } + + /** get document failure while executing the operation {@code null} in case of no failure */ + public Exception getFailure() { + return failure; + } + + /** get total time in nanoseconds */ + public long getTook() { + return took; + } + + public Operation.TYPE getOperationType() { + return operationType; + } + + void setTranslogLocation(Translog.Location translogLocation) { + if (freeze.get() == null) { + assert failure == null : "failure has to be null to set translog location"; + this.translogLocation = translogLocation; + } else { + throw new IllegalStateException("result is already frozen"); + } + } + + void setTook(long took) { + if (freeze.get() == null) { + this.took = took; + } else { + throw new IllegalStateException("result is already frozen"); + } + } + + void freeze() { + freeze.set(true); + } + } + + public static class IndexResult extends Result { + private final boolean created; + + public IndexResult(long version, boolean created) { + super(Operation.TYPE.INDEX, version); + this.created = created; + } + + public IndexResult(Exception failure, long version) { + super(Operation.TYPE.INDEX, failure, version); + this.created = false; + } + + public boolean isCreated() { + return created; + } + } + + public static class DeleteResult extends Result { + private final boolean found; + + public DeleteResult(long version, boolean found) { + super(Operation.TYPE.DELETE, version); + this.found = found; + } + + public DeleteResult(Exception failure, long version) { + super(Operation.TYPE.DELETE, failure, version); + this.found = false; + } + + public boolean isFound() { + return found; + } + } /** * Attempts to do a special commit where the given syncID is put into the commit data. The attempt @@ -767,13 +894,27 @@ public abstract class Engine implements Closeable { } public abstract static class Operation { + + /** type of operation (index, delete), subclasses use static types */ + public enum TYPE { + INDEX, DELETE; + + private final String lowercase; + + TYPE() { + this.lowercase = this.toString().toLowerCase(Locale.ROOT); + } + + public String getLowercase() { + return lowercase; + } + } + private final Term uid; - private long version; + private final long version; private final VersionType versionType; private final Origin origin; - private Translog.Location location; private final long startTime; - private long endTime; public Operation(Term uid, long version, VersionType versionType, Origin origin, long startTime) { this.uid = uid; @@ -806,27 +947,7 @@ public abstract class Engine implements Closeable { return this.version; } - public void updateVersion(long version) { - this.version = version; - } - - public void setTranslogLocation(Translog.Location location) { - this.location = location; - } - - public Translog.Location getTranslogLocation() { - return this.location; - } - - public int sizeInBytes() { - if (location != null) { - return location.size; - } else { - return estimatedSizeInBytes(); - } - } - - protected abstract int estimatedSizeInBytes(); + public abstract int estimatedSizeInBytes(); public VersionType versionType() { return this.versionType; @@ -839,20 +960,11 @@ public abstract class Engine implements Closeable { return this.startTime; } - public void endTime(long endTime) { - this.endTime = endTime; - } - - /** - * Returns operation end time in nanoseconds. - */ - public long endTime() { - return this.endTime; - } - - abstract String type(); + public abstract String type(); abstract String id(); + + abstract TYPE operationType(); } public static class Index extends Operation { @@ -860,7 +972,6 @@ public abstract class Engine implements Closeable { private final ParsedDocument doc; private final long autoGeneratedIdTimestamp; private final boolean isRetry; - private boolean created; public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime, long autoGeneratedIdTimestamp, boolean isRetry) { @@ -892,6 +1003,11 @@ public abstract class Engine implements Closeable { return this.doc.id(); } + @Override + TYPE operationType() { + return TYPE.INDEX; + } + public String routing() { return this.doc.routing(); } @@ -904,12 +1020,6 @@ public abstract class Engine implements Closeable { return this.doc.ttl(); } - @Override - public void updateVersion(long version) { - super.updateVersion(version); - this.doc.version().setLongValue(version); - } - public String parent() { return this.doc.parent(); } @@ -922,16 +1032,8 @@ public abstract class Engine implements Closeable { return this.doc.source(); } - public boolean isCreated() { - return created; - } - - public void setCreated(boolean created) { - this.created = created; - } - @Override - protected int estimatedSizeInBytes() { + public int estimatedSizeInBytes() { return (id().length() + type().length()) * 2 + source().length() + 12; } @@ -958,21 +1060,19 @@ public abstract class Engine implements Closeable { private final String type; private final String id; - private boolean found; - public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) { + public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime) { super(uid, version, versionType, origin, startTime); this.type = type; this.id = id; - this.found = found; } public Delete(String type, String id, Term uid) { - this(type, id, uid, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), false); + this(type, id, uid, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { - this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime(), template.found()); + this(template.type(), template.id(), template.uid(), template.version(), versionType, template.origin(), template.startTime()); } @Override @@ -985,20 +1085,15 @@ public abstract class Engine implements Closeable { return this.id; } - public void updateVersion(long version, boolean found) { - updateVersion(version); - this.found = found; - } - - public boolean found() { - return this.found; + @Override + TYPE operationType() { + return TYPE.DELETE; } @Override - protected int estimatedSizeInBytes() { + public int estimatedSizeInBytes() { return (uid().field().length() + uid().text().length()) * 2 + 20; } - } public static class Get { diff --git a/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java b/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java index bacc786c7dc..cd5e8a47406 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java +++ b/core/src/main/java/org/elasticsearch/index/engine/IndexFailedEngineException.java @@ -26,20 +26,18 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.Objects; +/** + * Deprecated as not used in 6.0, should be removed in 7.0 + * Still exists for bwc in serializing/deserializing from + * 5.x nodes + */ +@Deprecated public class IndexFailedEngineException extends EngineException { private final String type; private final String id; - public IndexFailedEngineException(ShardId shardId, String type, String id, Throwable cause) { - super(shardId, "Index failed for [" + type + "#" + id + "]", cause); - Objects.requireNonNull(type, "type must not be null"); - Objects.requireNonNull(id, "id must not be null"); - this.type = type; - this.id = id; - } - public IndexFailedEngineException(StreamInput in) throws IOException{ super(in); type = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 3cc1aa26195..36d5f195905 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -35,6 +35,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -57,6 +58,7 @@ import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -386,46 +388,61 @@ public class InternalEngine extends Engine { return currentVersion; } - private static VersionValueSupplier NEW_VERSION_VALUE = (u, t) -> new VersionValue(u); - - @FunctionalInterface - private interface VersionValueSupplier { - VersionValue apply(long updatedVersion, long time); - } - - private void maybeAddToTranslog( - final T op, - final long updatedVersion, - final Function toTranslogOp, - final VersionValueSupplier toVersionValue) throws IOException { - if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { - final Translog.Location translogLocation = translog.add(toTranslogOp.apply(op)); - op.setTranslogLocation(translogLocation); - } - versionMap.putUnderLock(op.uid().bytes(), toVersionValue.apply(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); - - } - @Override - public void index(Index index) { + public IndexResult index(Index index) { + IndexResult result; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (index.origin().isRecovery()) { // Don't throttle recovery operations - innerIndex(index); + result = innerIndex(index); } else { try (Releasable r = throttle.acquireThrottle()) { - innerIndex(index); + result = innerIndex(index); } } - } catch (IllegalStateException | IOException e) { - try { - maybeFailEngine("index", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new IndexFailedEngineException(shardId, index.type(), index.id(), e); + } catch (Exception e) { + result = new IndexResult(checkIfDocumentFailureOrThrow(index, e), index.version()); } + return result; + } + + /** + * Inspects exception thrown when executing index or delete operations + * + * @return failure if the failure is a document specific failure (e.g. analysis chain failure) + * or throws Exception if the failure caused the engine to fail (e.g. out of disk, lucene tragic event) + * + * Note: pkg-private for testing + */ + final Exception checkIfDocumentFailureOrThrow(final Operation operation, final Exception failure) { + boolean isDocumentFailure; + try { + // When indexing a document into Lucene, Lucene distinguishes between environment related errors + // (like out of disk space) and document specific errors (like analysis chain problems) by setting + // the IndexWriter.getTragicEvent() value for the former. maybeFailEngine checks for these kind of + // errors and returns true if that is the case. We use that to indicate a document level failure + // and set the error in operation.setFailure. In case of environment related errors, the failure + // is bubbled up + isDocumentFailure = maybeFailEngine(operation.operationType().getLowercase(), failure) == false; + } catch (Exception inner) { + // we failed checking whether the failure can fail the engine, treat it as a persistent engine failure + isDocumentFailure = false; + failure.addSuppressed(inner); + } + if (isDocumentFailure) { + return failure; + } else { + // throw original exception in case the exception caused the engine to fail + rethrow(failure); + return null; + } + } + + // hack to rethrow original exception in case of engine level failures during index/delete operation + @SuppressWarnings("unchecked") + private static void rethrow(Throwable t) throws T { + throw (T) t; } private boolean canOptimizeAddDocument(Index index) { @@ -452,7 +469,9 @@ public class InternalEngine extends Engine { return false; } - private void innerIndex(Index index) throws IOException { + private IndexResult innerIndex(Index index) throws IOException { + final Translog.Location location; + final long updatedVersion; try (Releasable ignored = acquireLock(index.uid())) { lastWriteNanos = index.startTime(); /* if we have an autoGeneratedID that comes into the engine we can potentially optimize @@ -484,7 +503,8 @@ public class InternalEngine extends Engine { // if anything is fishy here ie. there is a retry we go and force updateDocument below so we are updating the document in the // lucene index without checking the version map but we still do the version check final boolean forceUpdateDocument; - if (canOptimizeAddDocument(index)) { + final boolean canOptimizeAddDocument = canOptimizeAddDocument(index); + if (canOptimizeAddDocument) { long deOptimizeTimestamp = maxUnsafeAutoIdTimestamp.get(); if (index.isRetry()) { forceUpdateDocument = true; @@ -516,60 +536,81 @@ public class InternalEngine extends Engine { } } final long expectedVersion = index.version(); + final IndexResult indexResult; if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) { - index.setCreated(false); - return; - } - final long updatedVersion = updateVersion(index, currentVersion, expectedVersion); - index.setCreated(deleted); - if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) { - // document does not exists, we can optimize for create - index(index, indexWriter); + // skip index operation because of version conflict on recovery + indexResult = new IndexResult(expectedVersion, false); } else { - update(index, indexWriter); + updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); + index.parsedDoc().version().setLongValue(updatedVersion); + if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) { + // document does not exists, we can optimize for create, but double check if assertions are running + assert assertDocDoesNotExist(index, canOptimizeAddDocument == false); + index(index.docs(), indexWriter); + } else { + update(index.uid(), index.docs(), indexWriter); + } + indexResult = new IndexResult(updatedVersion, deleted); + location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY + ? translog.add(new Translog.Index(index, indexResult)) + : null; + versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion)); + indexResult.setTranslogLocation(location); } - maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE); + indexResult.setTook(System.nanoTime() - index.startTime()); + indexResult.freeze(); + return indexResult; } } - private long updateVersion(Engine.Operation op, long currentVersion, long expectedVersion) { - final long updatedVersion = op.versionType().updateVersion(currentVersion, expectedVersion); - op.updateVersion(updatedVersion); - return updatedVersion; - } - - private static void index(final Index index, final IndexWriter indexWriter) throws IOException { - if (index.docs().size() > 1) { - indexWriter.addDocuments(index.docs()); + private static void index(final List docs, final IndexWriter indexWriter) throws IOException { + if (docs.size() > 1) { + indexWriter.addDocuments(docs); } else { - indexWriter.addDocument(index.docs().get(0)); + indexWriter.addDocument(docs.get(0)); } } - private static void update(final Index index, final IndexWriter indexWriter) throws IOException { - if (index.docs().size() > 1) { - indexWriter.updateDocuments(index.uid(), index.docs()); + /** + * Asserts that the doc in the index operation really doesn't exist + */ + private boolean assertDocDoesNotExist(final Index index, final boolean allowDeleted) throws IOException { + final VersionValue versionValue = versionMap.getUnderLock(index.uid()); + if (versionValue != null) { + if (versionValue.delete() == false || allowDeleted == false) { + throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + versionValue + ")"); + } } else { - indexWriter.updateDocument(index.uid(), index.docs().get(0)); + try (final Searcher searcher = acquireSearcher("assert doc doesn't exist")) { + final long docsWithId = searcher.searcher().count(new TermQuery(index.uid())); + if (docsWithId > 0) { + throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + "] times in index"); + } + } + } + return true; + } + + private static void update(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + if (docs.size() > 1) { + indexWriter.updateDocuments(uid, docs); + } else { + indexWriter.updateDocument(uid, docs.get(0)); } } @Override - public void delete(Delete delete) throws EngineException { + public DeleteResult delete(Delete delete) { + DeleteResult result; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: - innerDelete(delete); - } catch (IllegalStateException | IOException e) { - try { - maybeFailEngine("delete", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new DeleteFailedEngineException(shardId, delete, e); + result = innerDelete(delete); + } catch (Exception e) { + result = new DeleteResult(checkIfDocumentFailureOrThrow(delete, e), delete.version()); } - maybePruneDeletedTombstones(); + return result; } private void maybePruneDeletedTombstones() { @@ -580,7 +621,10 @@ public class InternalEngine extends Engine { } } - private void innerDelete(Delete delete) throws IOException { + private DeleteResult innerDelete(Delete delete) throws IOException { + final Translog.Location location; + final long updatedVersion; + final boolean found; try (Releasable ignored = acquireLock(delete.uid())) { lastWriteNanos = delete.startTime(); final long currentVersion; @@ -596,19 +640,28 @@ public class InternalEngine extends Engine { } final long expectedVersion = delete.version(); - if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) return; - - final long updatedVersion = updateVersion(delete, currentVersion, expectedVersion); - - final boolean found = deleteIfFound(delete, currentVersion, deleted, versionValue); - - delete.updateVersion(updatedVersion, found); - - maybeAddToTranslog(delete, updatedVersion, Translog.Delete::new, DeleteVersionValue::new); + final DeleteResult deleteResult; + if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) { + // skip executing delete because of version conflict on recovery + deleteResult = new DeleteResult(expectedVersion, true); + } else { + updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); + found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue); + deleteResult = new DeleteResult(updatedVersion, found); + location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY + ? translog.add(new Translog.Delete(delete, deleteResult)) + : null; + versionMap.putUnderLock(delete.uid().bytes(), + new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); + deleteResult.setTranslogLocation(location); + } + deleteResult.setTook(System.nanoTime() - delete.startTime()); + deleteResult.freeze(); + return deleteResult; } } - private boolean deleteIfFound(Delete delete, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException { + private boolean deleteIfFound(Term uid, long currentVersion, boolean deleted, VersionValue versionValue) throws IOException { final boolean found; if (currentVersion == Versions.NOT_FOUND) { // doc does not exist and no prior deletes @@ -618,7 +671,7 @@ public class InternalEngine extends Engine { found = false; } else { // we deleted a currently existing document - indexWriter.deleteDocuments(delete.uid()); + indexWriter.deleteDocuments(uid); found = true; } return found; @@ -1086,7 +1139,8 @@ public class InternalEngine extends Engine { } } - private IndexWriter createWriter(boolean create) throws IOException { + // pkg-private for testing + IndexWriter createWriter(boolean create) throws IOException { try { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close diff --git a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 3aafcaff748..d84f03e83dd 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -106,12 +106,12 @@ public class ShadowEngine extends Engine { @Override - public void index(Index index) throws EngineException { + public IndexResult index(Index index) { throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine"); } @Override - public void delete(Delete delete) throws EngineException { + public DeleteResult delete(Delete delete) { throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine"); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java index 662c88df5d9..5258b270091 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -57,4 +57,11 @@ class VersionValue implements Accountable { public Collection getChildResources() { return Collections.emptyList(); } + + @Override + public String toString() { + return "VersionValue{" + + "version=" + version + + '}'; + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index 2cdeed9f040..50c7d98be92 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -69,7 +69,7 @@ public class DocumentMapperParser { } public Mapper.TypeParser.ParserContext parserContext(String type) { - return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier.get()); + return new Mapper.TypeParser.ParserContext(type, indexAnalyzers, similarityService::getSimilarity, mapperService, typeParsers::get, indexVersionCreated, parseFieldMatcher, queryShardContextSupplier); } public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 1c54c2136c9..83a20e03ffe 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,6 +30,7 @@ import org.elasticsearch.index.similarity.SimilarityProvider; import java.util.Map; import java.util.Objects; import java.util.function.Function; +import java.util.function.Supplier; public abstract class Mapper implements ToXContent, Iterable { @@ -93,11 +93,13 @@ public abstract class Mapper implements ToXContent, Iterable { private final ParseFieldMatcher parseFieldMatcher; - private final QueryShardContext queryShardContext; + private final Supplier queryShardContextSupplier; + private QueryShardContext queryShardContext; public ParserContext(String type, IndexAnalyzers indexAnalyzers, Function similarityLookupService, MapperService mapperService, Function typeParsers, - Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, QueryShardContext queryShardContext) { + Version indexVersionCreated, ParseFieldMatcher parseFieldMatcher, + Supplier queryShardContextSupplier) { this.type = type; this.indexAnalyzers = indexAnalyzers; this.similarityLookupService = similarityLookupService; @@ -105,7 +107,7 @@ public abstract class Mapper implements ToXContent, Iterable { this.typeParsers = typeParsers; this.indexVersionCreated = indexVersionCreated; this.parseFieldMatcher = parseFieldMatcher; - this.queryShardContext = queryShardContext; + this.queryShardContextSupplier = queryShardContextSupplier; } public String type() { @@ -137,6 +139,10 @@ public abstract class Mapper implements ToXContent, Iterable { } public QueryShardContext queryShardContext() { + // No need for synchronization, this class must be used in a single thread + if (queryShardContext == null) { + queryShardContext = queryShardContextSupplier.get(); + } return queryShardContext; } @@ -155,7 +161,7 @@ public abstract class Mapper implements ToXContent, Iterable { static class MultiFieldParserContext extends ParserContext { MultiFieldParserContext(ParserContext in) { - super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in.queryShardContext()); + super(in.type(), in.indexAnalyzers, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher(), in::queryShardContext); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 3e5262f12fa..799c1baedbf 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -54,7 +54,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.LongSupplier; import java.util.function.Supplier; import java.util.stream.Collectors; diff --git a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index c75ba1fda99..ef94ff16cf8 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -49,6 +51,8 @@ import java.util.Optional; public class FuzzyQueryBuilder extends AbstractQueryBuilder implements MultiTermQueryBuilder { public static final String NAME = "fuzzy"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(FuzzyQueryBuilder.class)); + /** Default maximum edit distance. Defaults to AUTO. */ public static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO; @@ -151,6 +155,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder i * @param value The value of the term */ public FuzzyQueryBuilder(String fieldName, Object value) { + DEPRECATION_LOGGER.deprecated("{} query is deprecated. Instead use the [match] query with fuzziness parameter", NAME); if (Strings.isEmpty(fieldName)) { throw new IllegalArgumentException("field name cannot be null or empty"); } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index ccca0af652b..b569c36ed8f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -67,7 +67,7 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier { /** * Returns a clients to fetch resources from local or remove nodes. */ - public final Client getClient() { + public Client getClient() { return client; } @@ -86,7 +86,9 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier { return mapperService; } - /** Return the current {@link IndexReader}, or {@code null} if we are on the coordinating node. */ + /** Return the current {@link IndexReader}, or {@code null} if no index reader is available, for + * instance if we are on the coordinating node or if this rewrite context is used to index + * queries (percolation). */ public IndexReader getIndexReader() { return reader; } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 60befb9577a..4ba49e5f0e9 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -421,4 +421,9 @@ public class QueryShardContext extends QueryRewriteContext { return super.nowInMillis(); } + @Override + public Client getClient() { + failIfFrozen(); // we somebody uses a terms filter with lookup for instance can't be cached... + return super.getClient(); + } } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 807343237d2..2867169ecbe 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -59,9 +60,10 @@ import java.util.TreeMap; public class QueryStringQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "query_string"; + public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099); + public static final boolean DEFAULT_AUTO_GENERATE_PHRASE_QUERIES = false; public static final int DEFAULT_MAX_DETERMINED_STATES = Operations.DEFAULT_MAX_DETERMINIZED_STATES; - public static final boolean DEFAULT_LOWERCASE_EXPANDED_TERMS = true; public static final boolean DEFAULT_ENABLE_POSITION_INCREMENTS = true; public static final boolean DEFAULT_ESCAPE = false; public static final boolean DEFAULT_USE_DIS_MAX = true; @@ -71,7 +73,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue. - */ - public QueryStringQueryBuilder lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - return this; - } - - public boolean lowercaseExpandedTerms() { - return this.lowercaseExpandedTerms; - } - /** * Set to true to enable position increments in result query. Defaults to * true. @@ -473,6 +478,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue to enable analysis on wildcard and prefix queries. */ @@ -485,11 +495,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder{@value #DEFAULT_SPLIT_ON_WHITESPACE}. + */ + public QueryStringQueryBuilder splitOnWhitespace(boolean value) { + this.splitOnWhitespace = value; + return this; + } + + public boolean splitOnWhitespace() { + return splitOnWhitespace; + } + @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); @@ -597,7 +606,6 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder fieldsAndWeights = new HashMap<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -707,7 +714,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder entry : weights.entrySet()) { + final String fieldName = entry.getKey(); try { - Query query = new FuzzyQuery(new Term(entry.getKey(), text), fuzziness); + final BytesRef term = getAnalyzer().normalize(fieldName, text); + Query query = new FuzzyQuery(new Term(fieldName, term), fuzziness); bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD); } catch (RuntimeException e) { rethrowUnlessLenient(e); @@ -120,9 +119,18 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp bq.setDisableCoord(true); for (Map.Entry entry : weights.entrySet()) { try { - Query q = createPhraseQuery(entry.getKey(), text, slop); + String field = entry.getKey(); + if (settings.quoteFieldSuffix() != null) { + String quoteField = field + settings.quoteFieldSuffix(); + MappedFieldType quotedFieldType = context.fieldMapper(quoteField); + if (quotedFieldType != null) { + field = quoteField; + } + } + Float boost = entry.getValue(); + Query q = createPhraseQuery(field, text, slop); if (q != null) { - bq.add(wrapWithBoost(q, entry.getValue()), BooleanClause.Occur.SHOULD); + bq.add(wrapWithBoost(q, boost), BooleanClause.Occur.SHOULD); } } catch (RuntimeException e) { rethrowUnlessLenient(e); @@ -137,20 +145,19 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp */ @Override public Query newPrefixQuery(String text) { - if (settings.lowercaseExpandedTerms()) { - text = text.toLowerCase(settings.locale()); - } BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.setDisableCoord(true); for (Map.Entry entry : weights.entrySet()) { + final String fieldName = entry.getKey(); try { if (settings.analyzeWildcard()) { - Query analyzedQuery = newPossiblyAnalyzedQuery(entry.getKey(), text); + Query analyzedQuery = newPossiblyAnalyzedQuery(fieldName, text); if (analyzedQuery != null) { bq.add(wrapWithBoost(analyzedQuery, entry.getValue()), BooleanClause.Occur.SHOULD); } } else { - Query query = new PrefixQuery(new Term(entry.getKey(), text)); + Term term = new Term(fieldName, getAnalyzer().normalize(fieldName, text)); + Query query = new PrefixQuery(term); bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD); } } catch (RuntimeException e) { @@ -173,11 +180,11 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp * of {@code TermQuery}s and {@code PrefixQuery}s */ private Query newPossiblyAnalyzedQuery(String field, String termStr) { - List> tlist = new ArrayList<> (); + List> tlist = new ArrayList<> (); // get Analyzer from superclass and tokenize the term try (TokenStream source = getAnalyzer().tokenStream(field, termStr)) { source.reset(); - List currentPos = new ArrayList<>(); + List currentPos = new ArrayList<>(); CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); PositionIncrementAttribute posAtt = source.addAttribute(PositionIncrementAttribute.class); @@ -188,7 +195,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp tlist.add(currentPos); currentPos = new ArrayList<>(); } - currentPos.add(termAtt.toString()); + final BytesRef term = getAnalyzer().normalize(field, termAtt.toString()); + currentPos.add(term); hasMoreTokens = source.incrementToken(); } if (currentPos.isEmpty() == false) { @@ -214,7 +222,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp // build a boolean query with prefix on the last position only. BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int pos = 0; pos < tlist.size(); pos++) { - List plist = tlist.get(pos); + List plist = tlist.get(pos); boolean isLastPos = (pos == tlist.size()-1); Query posQuery; if (plist.size() == 1) { @@ -232,7 +240,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp posQuery = new SynonymQuery(terms); } else { BooleanQuery.Builder innerBuilder = new BooleanQuery.Builder(); - for (String token : plist) { + for (BytesRef token : plist) { innerBuilder.add(new BooleanClause(new PrefixQuery(new Term(field, token)), BooleanClause.Occur.SHOULD)); } @@ -248,14 +256,12 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp * their default values */ static class Settings { - /** Locale to use for parsing. */ - private Locale locale = SimpleQueryStringBuilder.DEFAULT_LOCALE; - /** Specifies whether parsed terms should be lowercased. */ - private boolean lowercaseExpandedTerms = SimpleQueryStringBuilder.DEFAULT_LOWERCASE_EXPANDED_TERMS; /** Specifies whether lenient query parsing should be used. */ private boolean lenient = SimpleQueryStringBuilder.DEFAULT_LENIENT; /** Specifies whether wildcards should be analyzed. */ private boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD; + /** Specifies a suffix, if any, to apply to field names for phrase matching. */ + private String quoteFieldSuffix = null; /** * Generates default {@link Settings} object (uses ROOT locale, does @@ -264,36 +270,6 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp public Settings() { } - public Settings(Locale locale, Boolean lowercaseExpandedTerms, Boolean lenient, Boolean analyzeWildcard) { - this.locale = locale; - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - this.lenient = lenient; - this.analyzeWildcard = analyzeWildcard; - } - - /** Specifies the locale to use for parsing, Locale.ROOT by default. */ - public void locale(Locale locale) { - this.locale = (locale != null) ? locale : SimpleQueryStringBuilder.DEFAULT_LOCALE; - } - - /** Returns the locale to use for parsing. */ - public Locale locale() { - return this.locale; - } - - /** - * Specifies whether to lowercase parse terms, defaults to true if - * unset. - */ - public void lowercaseExpandedTerms(boolean lowercaseExpandedTerms) { - this.lowercaseExpandedTerms = lowercaseExpandedTerms; - } - - /** Returns whether to lowercase parse terms. */ - public boolean lowercaseExpandedTerms() { - return this.lowercaseExpandedTerms; - } - /** Specifies whether to use lenient parsing, defaults to false. */ public void lenient(boolean lenient) { this.lenient = lenient; @@ -314,12 +290,24 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp return analyzeWildcard; } + /** + * Set the suffix to append to field names for phrase matching. + */ + public void quoteFieldSuffix(String suffix) { + this.quoteFieldSuffix = suffix; + } + + /** + * Return the suffix to append for phrase matching, or {@code null} if + * no suffix should be appended. + */ + public String quoteFieldSuffix() { + return quoteFieldSuffix; + } + @Override public int hashCode() { - // checking the return value of toLanguageTag() for locales only. - // For further reasoning see - // https://issues.apache.org/jira/browse/LUCENE-4021 - return Objects.hash(locale.toLanguageTag(), lowercaseExpandedTerms, lenient, analyzeWildcard); + return Objects.hash(lenient, analyzeWildcard, quoteFieldSuffix); } @Override @@ -331,14 +319,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp return false; } Settings other = (Settings) obj; - - // checking the return value of toLanguageTag() for locales only. - // For further reasoning see - // https://issues.apache.org/jira/browse/LUCENE-4021 - return (Objects.equals(locale.toLanguageTag(), other.locale.toLanguageTag()) - && Objects.equals(lowercaseExpandedTerms, other.lowercaseExpandedTerms) - && Objects.equals(lenient, other.lenient) - && Objects.equals(analyzeWildcard, other.analyzeWildcard)); + return Objects.equals(lenient, other.lenient) && Objects.equals(analyzeWildcard, other.analyzeWildcard) + && Objects.equals(quoteFieldSuffix, other.quoteFieldSuffix); } } } diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index fbe5964f2a0..fd297075067 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -78,10 +79,6 @@ import java.util.TreeMap; * > online documentation. */ public class SimpleQueryStringBuilder extends AbstractQueryBuilder { - /** Default locale used for parsing.*/ - public static final Locale DEFAULT_LOCALE = Locale.ROOT; - /** Default for lowercasing parsed terms.*/ - public static final boolean DEFAULT_LOWERCASE_EXPANDED_TERMS = true; /** Default for using lenient query parsing.*/ public static final boolean DEFAULT_LENIENT = false; /** Default for wildcard analysis.*/ @@ -94,16 +91,21 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); } @@ -129,11 +139,11 @@ public interface IndexingOperationListener { } @Override - public void postDelete(Engine.Delete delete) { + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { assert delete != null; for (IndexingOperationListener listener : listeners) { try { - listener.postDelete(delete); + listener.postDelete(delete, result); } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java index f62b8f7fe3c..32868a7368a 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java @@ -74,14 +74,18 @@ final class InternalIndexingStats implements IndexingOperationListener { } @Override - public void postIndex(Engine.Index index, boolean created) { - if (!index.origin().isRecovery()) { - long took = index.endTime() - index.startTime(); - totalStats.indexMetric.inc(took); - totalStats.indexCurrent.dec(); - StatsHolder typeStats = typeStats(index.type()); - typeStats.indexMetric.inc(took); - typeStats.indexCurrent.dec(); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + if (result.hasFailure() == false) { + if (!index.origin().isRecovery()) { + long took = result.getTook(); + totalStats.indexMetric.inc(took); + totalStats.indexCurrent.dec(); + StatsHolder typeStats = typeStats(index.type()); + typeStats.indexMetric.inc(took); + typeStats.indexCurrent.dec(); + } + } else { + postIndex(index, result.getFailure()); } } @@ -106,14 +110,18 @@ final class InternalIndexingStats implements IndexingOperationListener { } @Override - public void postDelete(Engine.Delete delete) { - if (!delete.origin().isRecovery()) { - long took = delete.endTime() - delete.startTime(); - totalStats.deleteMetric.inc(took); - totalStats.deleteCurrent.dec(); - StatsHolder typeStats = typeStats(delete.type()); - typeStats.deleteMetric.inc(took); - typeStats.deleteCurrent.dec(); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + if (result.hasFailure() == false) { + if (!delete.origin().isRecovery()) { + long took = result.getTook(); + totalStats.deleteMetric.inc(took); + totalStats.deleteCurrent.dec(); + StatsHolder typeStats = typeStats(delete.type()); + typeStats.deleteMetric.inc(took); + typeStats.deleteCurrent.dec(); + } + } else { + postDelete(delete, result.getFailure()); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 64ae0c77007..5e5d2a84131 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -170,7 +170,7 @@ public class TranslogRecoveryPerformer { logger.trace("[translog] recover [delete] op of [{}][{}]", uid.type(), uid.id()); } final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(), - delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime(), false); + delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime()); delete(engine, engineDelete); break; default: diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 056716a29bd..9cf60dbe422 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -830,13 +830,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - public Index(Engine.Index index) { + public Index(Engine.Index index, Engine.IndexResult indexResult) { this.id = index.id(); this.type = index.type(); this.source = index.source(); this.routing = index.routing(); this.parent = index.parent(); - this.version = index.version(); + this.version = indexResult.getVersion(); this.timestamp = index.timestamp(); this.ttl = index.ttl(); this.versionType = index.versionType(); @@ -994,9 +994,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC assert versionType.validateVersionForWrites(this.version); } - public Delete(Engine.Delete delete) { + public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) { this.uid = delete.uid(); - this.version = delete.version(); + this.version = deleteResult.getVersion(); this.versionType = delete.versionType(); } diff --git a/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java b/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java index c0d929d82f5..98afd8781b4 100644 --- a/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java +++ b/core/src/main/java/org/elasticsearch/indices/AbstractIndexShardCacheEntity.java @@ -19,40 +19,15 @@ package org.elasticsearch.indices; -import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.shard.IndexShard; -import java.io.IOException; - /** * Abstract base class for the an {@link IndexShard} level {@linkplain IndicesRequestCache.CacheEntity}. */ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.CacheEntity { - @FunctionalInterface - public interface Loader { - void load(StreamOutput out) throws IOException; - } - - private final Loader loader; - private boolean loadedFromCache = true; - - protected AbstractIndexShardCacheEntity(Loader loader) { - this.loader = loader; - } - - /** - * When called after passing this through - * {@link IndicesRequestCache#getOrCompute(IndicesRequestCache.CacheEntity, DirectoryReader, BytesReference)} this will return whether - * or not the result was loaded from the cache. - */ - public final boolean loadedFromCache() { - return loadedFromCache; - } /** * Get the {@linkplain ShardRequestCache} used to track cache statistics. @@ -60,27 +35,7 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach protected abstract ShardRequestCache stats(); @Override - public final IndicesRequestCache.Value loadValue() throws IOException { - /* BytesStreamOutput allows to pass the expected size but by default uses - * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. - * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful - * since we don't shrink to the actual size once we are done serializing. - * By passing 512 as the expected size we will resize the byte array in the stream - * slowly until we hit the page size and don't waste too much memory for small query - * results.*/ - final int expectedSizeInBytes = 512; - try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { - loader.load(out); - // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep - // the memory properly paged instead of having varied sized bytes - final BytesReference reference = out.bytes(); - loadedFromCache = false; - return new IndicesRequestCache.Value(reference, out.ramBytesUsed()); - } - } - - @Override - public final void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) { + public final void onCached(IndicesRequestCache.Key key, BytesReference value) { stats().onCached(key, value); } @@ -95,7 +50,7 @@ abstract class AbstractIndexShardCacheEntity implements IndicesRequestCache.Cach } @Override - public final void onRemoval(RemovalNotification notification) { + public final void onRemoval(RemovalNotification notification) { stats().onRemoval(notification.getKey(), notification.getValue(), notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 3b4258a8bdf..11dbfb36f4f 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -189,11 +189,6 @@ public class IndexingMemoryController extends AbstractComponent implements Index statusChecker.run(); } - /** called by IndexShard to record that this many bytes were written to translog */ - public void bytesWritten(int bytes) { - statusChecker.bytesWritten(bytes); - } - /** Asks this shard to throttle indexing to one thread */ protected void activateThrottling(IndexShard shard) { shard.activateThrottling(); @@ -205,17 +200,20 @@ public class IndexingMemoryController extends AbstractComponent implements Index } @Override - public void postIndex(Engine.Index index, boolean created) { - recordOperationBytes(index); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + recordOperationBytes(index, result); } @Override - public void postDelete(Engine.Delete delete) { - recordOperationBytes(delete); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + recordOperationBytes(delete, result); } - private void recordOperationBytes(Engine.Operation op) { - bytesWritten(op.sizeInBytes()); + /** called by IndexShard to record estimated bytes written to translog for the operation */ + private void recordOperationBytes(Engine.Operation operation, Engine.Result result) { + if (result.hasFailure() == false) { + statusChecker.bytesWritten(operation.estimatedSizeInBytes()); + } } private static final class ShardAndBytesUsed implements Comparable { diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index a08f9ca1ad4..0fcda5c8fd5 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -41,12 +41,12 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import java.io.Closeable; -import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.function.Supplier; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -62,7 +62,7 @@ import java.util.concurrent.ConcurrentMap; * is functional. */ public final class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { + BytesReference>, Closeable { /** * A setting to enable or disable request caching on an index level. Its dynamic by default @@ -79,14 +79,14 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); private final ByteSizeValue size; private final TimeValue expire; - private final Cache cache; + private final Cache cache; IndicesRequestCache(Settings settings) { super(settings); this.size = INDICES_CACHE_QUERY_SIZE.get(settings); this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; long sizeInBytes = size.getBytes(); - CacheBuilder cacheBuilder = CacheBuilder.builder() + CacheBuilder cacheBuilder = CacheBuilder.builder() .setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this); if (expire != null) { cacheBuilder.setExpireAfterAccess(expire); @@ -105,15 +105,16 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo } @Override - public void onRemoval(RemovalNotification notification) { + public void onRemoval(RemovalNotification notification) { notification.getKey().entity.onRemoval(notification); } - BytesReference getOrCompute(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) throws Exception { + BytesReference getOrCompute(CacheEntity cacheEntity, Supplier loader, + DirectoryReader reader, BytesReference cacheKey) throws Exception { final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey); - Loader loader = new Loader(cacheEntity); - Value value = cache.computeIfAbsent(key, loader); - if (loader.isLoaded()) { + Loader cacheLoader = new Loader(cacheEntity, loader); + BytesReference value = cache.computeIfAbsent(key, cacheLoader); + if (cacheLoader.isLoaded()) { key.entity.onMiss(); // see if its the first time we see this reader, and make sure to register a cleanup key CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion()); @@ -126,16 +127,18 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo } else { key.entity.onHit(); } - return value.reference; + return value; } - private static class Loader implements CacheLoader { + private static class Loader implements CacheLoader { private final CacheEntity entity; + private final Supplier loader; private boolean loaded; - Loader(CacheEntity entity) { + Loader(CacheEntity entity, Supplier loader) { this.entity = entity; + this.loader = loader; } public boolean isLoaded() { @@ -143,8 +146,8 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo } @Override - public Value load(Key key) throws Exception { - Value value = entity.loadValue(); + public BytesReference load(Key key) throws Exception { + BytesReference value = loader.get(); entity.onCached(key, value); loaded = true; return value; @@ -154,16 +157,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo /** * Basic interface to make this cache testable. */ - interface CacheEntity { - /** - * Loads the actual cache value. this is the heavy lifting part. - */ - Value loadValue() throws IOException; + interface CacheEntity extends Accountable { /** - * Called after the value was loaded via {@link #loadValue()} + * Called after the value was loaded. */ - void onCached(Key key, Value value); + void onCached(Key key, BytesReference value); /** * Returns true iff the resource behind this entity is still open ie. @@ -190,32 +189,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo /** * Called when this entity instance is removed */ - void onRemoval(RemovalNotification notification); - } - - - - static class Value implements Accountable { - final BytesReference reference; - final long ramBytesUsed; - - Value(BytesReference reference, long ramBytesUsed) { - this.reference = reference; - this.ramBytesUsed = ramBytesUsed; - } - - @Override - public long ramBytesUsed() { - return ramBytesUsed; - } - - @Override - public Collection getChildResources() { - return Collections.emptyList(); - } + void onRemoval(RemovalNotification notification); } static class Key implements Accountable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); + public final CacheEntity entity; // use as identity equality public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped public final BytesReference value; @@ -228,7 +207,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo @Override public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_REF + Long.BYTES + value.length(); + return BASE_RAM_BYTES_USED + entity.ramBytesUsed() + value.length(); } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 4c7e541aafa..0d4bacefb93 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -23,11 +23,11 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -51,9 +51,11 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -98,7 +100,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; -import org.elasticsearch.indices.AbstractIndexShardCacheEntity.Loader; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -132,8 +133,10 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -1110,7 +1113,7 @@ public class IndicesService extends AbstractLifecycleComponent if (shard == null) { return; } - indicesRequestCache.clear(new IndexShardCacheEntity(shard, null)); + indicesRequestCache.clear(new IndexShardCacheEntity(shard)); logger.trace("{} explicit cache clear", shard.shardId()); } @@ -1122,13 +1125,19 @@ public class IndicesService extends AbstractLifecycleComponent */ public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception { assert canCache(request, context); - final IndexShardCacheEntity entity = new IndexShardCacheEntity(context.indexShard(), out -> { - queryPhase.execute(context); - context.queryResult().writeToNoId(out); - }); final DirectoryReader directoryReader = context.searcher().getDirectoryReader(); - final BytesReference bytesReference = indicesRequestCache.getOrCompute(entity, directoryReader, request.cacheKey()); - if (entity.loadedFromCache()) { + + boolean[] loadedFromCache = new boolean[] { true }; + BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> { + queryPhase.execute(context); + try { + context.queryResult().writeToNoId(out); + } catch (IOException e) { + throw new AssertionError("Could not serialize response", e); + } + loadedFromCache[0] = false; + }); + if (loadedFromCache[0]) { // restore the cached query result into the context final QuerySearchResult result = context.queryResult(); StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry); @@ -1154,7 +1163,11 @@ public class IndicesService extends AbstractLifecycleComponent } BytesReference cacheKey = new BytesArray("fieldstats:" + field); BytesReference statsRef = cacheShardLevelResult(shard, searcher.getDirectoryReader(), cacheKey, out -> { - out.writeOptionalWriteable(fieldType.stats(searcher.reader())); + try { + out.writeOptionalWriteable(fieldType.stats(searcher.reader())); + } catch (IOException e) { + throw new IllegalStateException("Failed to write field stats output", e); + } }); try (StreamInput in = statsRef.streamInput()) { return in.readOptionalWriteable(FieldStats::readFrom); @@ -1173,17 +1186,33 @@ public class IndicesService extends AbstractLifecycleComponent * @param loader loads the data into the cache if needed * @return the contents of the cache or the result of calling the loader */ - private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Loader loader) + private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Consumer loader) throws Exception { - IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard, loader); - return indicesRequestCache.getOrCompute(cacheEntity, reader, cacheKey); + IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard); + Supplier supplier = () -> { + /* BytesStreamOutput allows to pass the expected size but by default uses + * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. + * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful + * since we don't shrink to the actual size once we are done serializing. + * By passing 512 as the expected size we will resize the byte array in the stream + * slowly until we hit the page size and don't waste too much memory for small query + * results.*/ + final int expectedSizeInBytes = 512; + try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { + loader.accept(out); + // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep + // the memory properly paged instead of having varied sized bytes + return out.bytes(); + } + }; + return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey); } static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class); private final IndexShard indexShard; - protected IndexShardCacheEntity(IndexShard indexShard, Loader loader) { - super(loader); + protected IndexShardCacheEntity(IndexShard indexShard) { this.indexShard = indexShard; } @@ -1201,6 +1230,13 @@ public class IndicesService extends AbstractLifecycleComponent public Object getCacheIdentity() { return indexShard; } + + @Override + public long ramBytesUsed() { + // No need to take the IndexShard into account since it is shared + // across many entities + return BASE_RAM_BYTES_USED; + } } @FunctionalInterface diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index 9ad369e22d4..40f401ac6b4 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -116,7 +116,7 @@ public final class IngestMetadata implements MetaData.Custom { @Override public EnumSet context() { - return MetaData.API_AND_GATEWAY; + return MetaData.ALL_CONTEXTS; } @Override diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java index fa3c6aa861d..aec443280d0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.monitor.os; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -31,7 +32,7 @@ import java.util.Arrays; import java.util.Objects; public class OsStats implements Writeable, ToXContent { - + public static final Version V_5_1_0 = Version.fromId(5010099); private final long timestamp; private final Cpu cpu; private final Mem mem; @@ -51,7 +52,11 @@ public class OsStats implements Writeable, ToXContent { this.cpu = new Cpu(in); this.mem = new Mem(in); this.swap = new Swap(in); - this.cgroup = in.readOptionalWriteable(Cgroup::new); + if (in.getVersion().onOrAfter(V_5_1_0)) { + this.cgroup = in.readOptionalWriteable(Cgroup::new); + } else { + this.cgroup = null; + } } @Override @@ -60,7 +65,9 @@ public class OsStats implements Writeable, ToXContent { cpu.writeTo(out); mem.writeTo(out); swap.writeTo(out); - out.writeOptionalWriteable(cgroup); + if (out.getVersion().onOrAfter(V_5_1_0)) { + out.writeOptionalWriteable(cgroup); + } } public long getTimestamp() { diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 8d76ae9cbc3..f5ad4ff8772 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -76,6 +76,9 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.UnicastZenPing; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.GatewayAllocator; @@ -319,7 +322,8 @@ public class Node implements Closeable { final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); clusterService.add(scriptModule.getScriptService()); resourcesToClose.add(clusterService); - final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(), classpathPlugins); + final TribeService tribeService = new TribeService(settings, clusterService, nodeEnvironment.nodeId(), + s -> newTribeClientNode(s, classpathPlugins)); resourcesToClose.add(tribeService); final IngestService ingestService = new IngestService(settings, threadPool, this.environment, scriptModule.getScriptService(), analysisModule.getAnalysisRegistry(), pluginsService.filterPlugins(IngestPlugin.class)); @@ -393,7 +397,10 @@ public class Node implements Closeable { b.bind(HttpServer.class).toProvider(Providers.of(null)); }; } - modules.add(new DiscoveryModule(this.settings, transportService, networkService, pluginsService.filterPlugins(DiscoveryPlugin.class))); + final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, transportService, networkService, + pluginsService.filterPlugins(DiscoveryPlugin.class)); + final ZenPing zenPing = newZenPing(settings, threadPool, transportService, discoveryModule.getHostsProvider()); + modules.add(discoveryModule); pluginsService.processModules(modules); modules.add(b -> { b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry()); @@ -425,6 +432,7 @@ public class Node implements Closeable { b.bind(UpdateHelper.class).toInstance(new UpdateHelper(settings, scriptModule.getScriptService())); b.bind(MetaDataIndexUpgradeService.class).toInstance(new MetaDataIndexUpgradeService(settings, indicesModule.getMapperRegistry(), settingsModule.getIndexScopedSettings())); + b.bind(ZenPing.class).toInstance(zenPing); { RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); processRecoverySettings(settingsModule.getClusterSettings(), recoverySettings); @@ -881,4 +889,15 @@ public class Node implements Closeable { } return customNameResolvers; } + + /** Create a new ZenPing instance for use in zen discovery. */ + protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, + UnicastHostsProvider hostsProvider) { + return new UnicastZenPing(settings, threadPool, transportService, hostsProvider); + } + + /** Constructs an internal node used as a client into a cluster fronted by this tribe node. */ + protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { + return new Node(new Environment(settings), classpathPlugins); + } } diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 54cd34d6742..cf21f4cc830 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -19,6 +19,19 @@ package org.elasticsearch.plugins; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SettingCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.internal.InternalSettingsPreparer; + import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -26,18 +39,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.SettingCommand; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** @@ -67,7 +68,7 @@ final class RemovePluginCommand extends SettingCommand { final Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { throw new UserException( - ExitCodes.USAGE, + ExitCodes.CONFIG, "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins"); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActions.java b/core/src/main/java/org/elasticsearch/rest/action/RestActions.java index c5a3799a88d..d017dfaf874 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActions.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestActions.java @@ -192,7 +192,6 @@ public class RestActions { queryBuilder.defaultField(request.param("df")); queryBuilder.analyzer(request.param("analyzer")); queryBuilder.analyzeWildcard(request.paramAsBoolean("analyze_wildcard", false)); - queryBuilder.lowercaseExpandedTerms(request.paramAsBoolean("lowercase_expanded_terms", true)); queryBuilder.lenient(request.paramAsBoolean("lenient", null)); String defaultOperator = request.param("default_operator"); if (defaultOperator != null) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java b/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java index cc93e72d80d..c460331afaa 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java +++ b/core/src/main/java/org/elasticsearch/rest/action/RestBuilderListener.java @@ -34,11 +34,22 @@ public abstract class RestBuilderListener extends RestResponseListener @Override public final RestResponse buildResponse(Response response) throws Exception { - return buildResponse(response, channel.newBuilder()); + try (XContentBuilder builder = channel.newBuilder()) { + final RestResponse restResponse = buildResponse(response, builder); + assert assertBuilderClosed(builder); + return restResponse; + } } /** - * Builds a response to send back over the channel. + * Builds a response to send back over the channel. Implementors should ensure that they close the provided {@link XContentBuilder} + * using the {@link XContentBuilder#close()} method. */ public abstract RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception; + + // pkg private method that we can override for testing + boolean assertBuilderClosed(XContentBuilder xContentBuilder) { + assert xContentBuilder.generator().isClosed() : "callers should ensure the XContentBuilder is closed themselves"; + return true; + } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java index afde577de1e..9882b5bea3d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java @@ -20,9 +20,11 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -68,20 +70,22 @@ public class RestUpgradeAction extends BaseRestHandler { } private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { - return channel -> client.admin().indices().prepareUpgradeStatus(Strings.splitStringByCommaToArray(request.param("index"))) - .execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); + statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); + return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); } private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); + upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions())); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); return channel -> client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { @Override diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 87cbe976089..b632448192d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -85,7 +85,7 @@ public class RestNodesAction extends AbstractCatAction { clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - + final boolean fullId = request.paramAsBoolean("full_id", false); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { @@ -99,7 +99,8 @@ public class RestNodesAction extends AbstractCatAction { client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { - return RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), channel); + return RestTable.buildResponse(buildTable(fullId, request, clusterStateResponse, nodesInfoResponse, + nodesStatsResponse), channel); } }); } @@ -129,7 +130,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio"); table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory"); table.addCell("file_desc.current", "default:false;alias:fdc,fileDescriptorCurrent;text-align:right;desc:used file descriptors"); - table.addCell("file_desc.percent", "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio"); + table.addCell("file_desc.percent", + "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio"); table.addCell("file_desc.max", "default:false;alias:fdm,fileDescriptorMax;text-align:right;desc:max file descriptors"); table.addCell("cpu", "alias:cpu;text-align:right;desc:recent cpu usage"); @@ -137,7 +139,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg"); table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg"); table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime"); - table.addCell("node.role", "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only"); + table.addCell("node.role", + "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only"); table.addCell("master", "alias:m;desc:*:current master"); table.addCell("name", "alias:n;desc:node name"); @@ -150,9 +153,12 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); - table.addCell("request_cache.evictions", "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); - table.addCell("request_cache.hit_count", "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); - table.addCell("request_cache.miss_count", "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); + table.addCell("request_cache.evictions", + "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); + table.addCell("request_cache.hit_count", + "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); + table.addCell("request_cache.miss_count", + "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); @@ -165,16 +171,20 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets"); table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets"); - table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions"); + table.addCell("indexing.delete_current", + "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions"); table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions"); table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops"); - table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops"); + table.addCell("indexing.index_current", + "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops"); table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing"); table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops"); - table.addCell("indexing.index_failed", "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops"); + table.addCell("indexing.index_failed", + "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops"); table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges"); - table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs"); + table.addCell("merges.current_docs", + "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs"); table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges"); table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops"); table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged"); @@ -185,7 +195,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations"); - table.addCell("script.cache_evictions", "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions"); + table.addCell("script.cache_evictions", + "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions"); table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops"); table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase"); @@ -195,14 +206,19 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); - table.addCell("search.scroll_time", "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); + table.addCell("search.scroll_time", + "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open"); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); - table.addCell("segments.index_writer_memory", "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer"); - table.addCell("segments.version_map_memory", "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map"); - table.addCell("segments.fixed_bitset_memory", "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields"); + table.addCell("segments.index_writer_memory", + "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer"); + table.addCell("segments.version_map_memory", + "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map"); + table.addCell("segments.fixed_bitset_memory", + "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types" + + " and type filters for types referred in _parent fields"); table.addCell("suggest.current", "alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops"); table.addCell("suggest.time", "alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest"); @@ -212,8 +228,8 @@ public class RestNodesAction extends AbstractCatAction { return table; } - private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) { - boolean fullId = req.paramAsBoolean("full_id", false); + private Table buildTable(boolean fullId, RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, + NodesStatsResponse nodesStats) { DiscoveryNodes nodes = state.getState().nodes(); String masterId = nodes.getMasterNodeId(); @@ -255,14 +271,18 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent()); table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal()); table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors()); - table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors())); + table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(), + processStats.getMaxFileDescriptors())); table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors()); table.addCell(osStats == null ? null : Short.toString(osStats.getCpu().getPercent())); boolean hasLoadAverage = osStats != null && osStats.getCpu().getLoadAverage() != null; - table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0])); - table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1])); - table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2])); + table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[0] == -1 ? null : + String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[0])); + table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[1] == -1 ? null : + String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[1])); + table.addCell(!hasLoadAverage || osStats.getCpu().getLoadAverage()[2] == -1 ? null : + String.format(Locale.ROOT, "%.2f", osStats.getCpu().getLoadAverage()[2])); table.addCell(jvmStats == null ? null : jvmStats.getUptime()); final String roles; diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java index da2f2ed9698..e33da6d752a 100644 --- a/core/src/main/java/org/elasticsearch/script/Script.java +++ b/core/src/main/java/org/elasticsearch/script/Script.java @@ -109,7 +109,7 @@ public final class Script implements ToXContent, Writeable { boolean hasType = type != null; out.writeBoolean(hasType); if (hasType) { - ScriptType.writeTo(type, out); + type.writeTo(out); } out.writeOptionalString(lang); out.writeMap(params); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 979bffb4bcc..84855da2f94 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -131,7 +131,7 @@ public final class ScriptMetaData implements MetaData.Custom { @Override public EnumSet context() { - return MetaData.API_AND_GATEWAY; + return MetaData.ALL_CONTEXTS; } @Override diff --git a/core/src/main/java/org/elasticsearch/script/ScriptModes.java b/core/src/main/java/org/elasticsearch/script/ScriptModes.java index 4f9651b290a..15393948d66 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptModes.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptModes.java @@ -72,7 +72,7 @@ public class ScriptModes { } static String sourceKey(ScriptType scriptType) { - return SCRIPT_SETTINGS_PREFIX + "." + scriptType.getScriptType(); + return SCRIPT_SETTINGS_PREFIX + "." + scriptType.getName(); } static String getGlobalKey(String lang, ScriptType scriptType) { diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java index 07e0deb5b71..27a6ad04a70 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java @@ -50,7 +50,7 @@ public class ScriptSettings { for (ScriptType scriptType : ScriptType.values()) { scriptTypeSettingMap.put(scriptType, Setting.boolSetting( ScriptModes.sourceKey(scriptType), - scriptType.getDefaultScriptEnabled(), + scriptType.isDefaultEnabled(), Property.NodeScope)); } SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap); @@ -102,7 +102,7 @@ public class ScriptSettings { boolean defaultLangAndType = defaultNonFileScriptMode; // Files are treated differently because they are never default-deny if (ScriptType.FILE == scriptType) { - defaultLangAndType = ScriptType.FILE.getDefaultScriptEnabled(); + defaultLangAndType = ScriptType.FILE.isDefaultEnabled(); } final boolean defaultIfNothingSet = defaultLangAndType; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptType.java b/core/src/main/java/org/elasticsearch/script/ScriptType.java index 77865daa372..01592b57aad 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptType.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptType.java @@ -22,68 +22,118 @@ package org.elasticsearch.script; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -import java.util.Locale; /** - * The type of a script, more specifically where it gets loaded from: - * - provided dynamically at request time - * - loaded from an index - * - loaded from file + * ScriptType represents the way a script is stored and retrieved from the {@link ScriptService}. + * It's also used to by {@link ScriptSettings} and {@link ScriptModes} to determine whether or not + * a {@link Script} is allowed to be executed based on both default and user-defined settings. */ -public enum ScriptType { +public enum ScriptType implements Writeable { - INLINE(0, "inline", "inline", false), - STORED(1, "id", "stored", false), - FILE(2, "file", "file", true); + /** + * INLINE scripts are specified in numerous queries and compiled on-the-fly. + * They will be cached based on the lang and code of the script. + * They are turned off by default because most languages are insecure + * (Groovy and others), but can be overriden by the specific {@link ScriptEngineService} + * if the language is naturally secure (Painless, Mustache, and Expressions). + */ + INLINE ( 0 , new ParseField("inline") , false ), - private final int val; - private final ParseField parseField; - private final String scriptType; - private final boolean defaultScriptEnabled; + /** + * STORED scripts are saved as part of the {@link org.elasticsearch.cluster.ClusterState} + * based on user requests. They will be cached when they are first used in a query. + * They are turned off by default because most languages are insecure + * (Groovy and others), but can be overriden by the specific {@link ScriptEngineService} + * if the language is naturally secure (Painless, Mustache, and Expressions). + */ + STORED ( 1 , new ParseField("stored", "id") , false ), + /** + * FILE scripts are loaded from disk either on start-up or on-the-fly depending on + * user-defined settings. They will be compiled and cached as soon as they are loaded + * from disk. They are turned on by default as they should always be safe to execute. + */ + FILE ( 2 , new ParseField("file") , true ); + + /** + * Reads an int from the input stream and converts it to a {@link ScriptType}. + * @return The ScriptType read from the stream. Throws an {@link IllegalStateException} + * if no ScriptType is found based on the id. + */ public static ScriptType readFrom(StreamInput in) throws IOException { - int scriptTypeVal = in.readVInt(); - for (ScriptType type : values()) { - if (type.val == scriptTypeVal) { - return type; - } - } - throw new IllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + "] expected one of [" - + INLINE.val + "," + FILE.val + "," + STORED.val + "]"); - } + int id = in.readVInt(); - public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{ - if (scriptType != null) { - out.writeVInt(scriptType.val); + if (FILE.id == id) { + return FILE; + } else if (STORED.id == id) { + return STORED; + } else if (INLINE.id == id) { + return INLINE; } else { - out.writeVInt(INLINE.val); //Default to inline + throw new IllegalStateException("Error reading ScriptType id [" + id + "] from stream, expected one of [" + + FILE.id + " [" + FILE.parseField.getPreferredName() + "], " + + STORED.id + " [" + STORED.parseField.getPreferredName() + "], " + + INLINE.id + " [" + INLINE.parseField.getPreferredName() + "]]"); } } - ScriptType(int val, String name, String scriptType, boolean defaultScriptEnabled) { - this.val = val; - this.parseField = new ParseField(name); - this.scriptType = scriptType; - this.defaultScriptEnabled = defaultScriptEnabled; + private final int id; + private final ParseField parseField; + private final boolean defaultEnabled; + + /** + * Standard constructor. + * @param id A unique identifier for a type that can be read/written to a stream. + * @param parseField Specifies the name used to parse input from queries. + * @param defaultEnabled Whether or not a {@link ScriptType} can be run by default. + */ + ScriptType(int id, ParseField parseField, boolean defaultEnabled) { + this.id = id; + this.parseField = parseField; + this.defaultEnabled = defaultEnabled; } + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(id); + } + + /** + * @return The unique id for this {@link ScriptType}. + */ + public int getId() { + return id; + } + + /** + * @return The unique name for this {@link ScriptType} based on the {@link ParseField}. + */ + public String getName() { + return parseField.getPreferredName(); + } + + /** + * @return Specifies the name used to parse input from queries. + */ public ParseField getParseField() { return parseField; } - public boolean getDefaultScriptEnabled() { - return defaultScriptEnabled; - } - - public String getScriptType() { - return scriptType; + /** + * @return Whether or not a {@link ScriptType} can be run by default. Note + * this can be potentially overriden by any {@link ScriptEngineService}. + */ + public boolean isDefaultEnabled() { + return defaultEnabled; } + /** + * @return The same as calling {@link #getName()}. + */ @Override public String toString() { - return name().toLowerCase(Locale.ROOT); + return getName(); } - } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java index 48be5365bb1..78d19280ce2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java @@ -72,7 +72,9 @@ public class FilterAggregationBuilder extends AbstractAggregationBuilder doBuild(AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - return new FilterAggregatorFactory(name, type, filter, context, parent, subFactoriesBuilder, metaData); + // TODO this sucks we need a rewrite phase for aggregations too + final QueryBuilder rewrittenFilter = QueryBuilder.rewriteQuery(filter, context.searchContext().getQueryShardContext()); + return new FilterAggregatorFactory(name, type, rewrittenFilter, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index cd18386da1e..cdd1f8d19a7 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -197,13 +197,13 @@ public abstract class InternalSignificantTerms ordered = new BucketSignificancePriorityQueue<>(size); for (Map.Entry> entry : buckets.entrySet()) { List sameTermBuckets = entry.getValue(); final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext); - b.updateScore(getSignificanceHeuristic()); + b.updateScore(heuristic); if ((b.score > 0) && (b.subsetDf >= minDocCount)) { ordered.insertWithOverflow(b); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index 1cf422ae50a..5af538965d1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -217,9 +217,9 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB @Override protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - this.significanceHeuristic.initialize(context.searchContext()); + SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(context.searchContext()); return new SignificantTermsAggregatorFactory(name, type, config, includeExclude, executionHint, filterBuilder, - bucketCountThresholds, significanceHeuristic, context, parent, subFactoriesBuilder, metaData); + bucketCountThresholds, executionHeuristic, context, parent, subFactoriesBuilder, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index c854b036b00..748adb67ce5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -24,7 +24,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryShardException; @@ -43,18 +42,41 @@ import java.util.Objects; public class ScriptHeuristic extends SignificanceHeuristic { public static final String NAME = "script_heuristic"; - private final LongAccessor subsetSizeHolder; - private final LongAccessor supersetSizeHolder; - private final LongAccessor subsetDfHolder; - private final LongAccessor supersetDfHolder; private final Script script; - ExecutableScript executableScript = null; + + // This class holds an executable form of the script with private variables ready for execution + // on a single search thread. + static class ExecutableScriptHeuristic extends ScriptHeuristic { + private final LongAccessor subsetSizeHolder; + private final LongAccessor supersetSizeHolder; + private final LongAccessor subsetDfHolder; + private final LongAccessor supersetDfHolder; + private final ExecutableScript executableScript; + + ExecutableScriptHeuristic(Script script, ExecutableScript executableScript){ + super(script); + subsetSizeHolder = new LongAccessor(); + supersetSizeHolder = new LongAccessor(); + subsetDfHolder = new LongAccessor(); + supersetDfHolder = new LongAccessor(); + this.executableScript = executableScript; + executableScript.setNextVar("_subset_freq", subsetDfHolder); + executableScript.setNextVar("_subset_size", subsetSizeHolder); + executableScript.setNextVar("_superset_freq", supersetDfHolder); + executableScript.setNextVar("_superset_size", supersetSizeHolder); + } + + @Override + public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { + subsetSizeHolder.value = subsetSize; + supersetSizeHolder.value = supersetSize; + subsetDfHolder.value = subsetFreq; + supersetDfHolder.value = supersetFreq; + return ((Number) executableScript.run()).doubleValue(); + } + } public ScriptHeuristic(Script script) { - subsetSizeHolder = new LongAccessor(); - supersetSizeHolder = new LongAccessor(); - subsetDfHolder = new LongAccessor(); - supersetDfHolder = new LongAccessor(); this.script = script; } @@ -71,22 +93,15 @@ public class ScriptHeuristic extends SignificanceHeuristic { } @Override - public void initialize(InternalAggregation.ReduceContext context) { - initialize(context.scriptService().executable(script, ScriptContext.Standard.AGGS, Collections.emptyMap())); + public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext context) { + return new ExecutableScriptHeuristic(script, context.scriptService().executable(script, ScriptContext.Standard.AGGS, Collections.emptyMap())); } @Override - public void initialize(SearchContext context) { - initialize(context.getQueryShardContext().getExecutableScript(script, ScriptContext.Standard.AGGS, Collections.emptyMap())); + public SignificanceHeuristic rewrite(SearchContext context) { + return new ExecutableScriptHeuristic(script, context.getQueryShardContext().getExecutableScript(script, ScriptContext.Standard.AGGS, Collections.emptyMap())); } - public void initialize(ExecutableScript executableScript) { - executableScript.setNextVar("_subset_freq", subsetDfHolder); - executableScript.setNextVar("_subset_size", subsetSizeHolder); - executableScript.setNextVar("_superset_freq", supersetDfHolder); - executableScript.setNextVar("_superset_size", supersetSizeHolder); - this.executableScript = executableScript; - } /** * Calculates score with a script @@ -99,19 +114,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { */ @Override public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { - if (executableScript == null) { - //In tests, wehn calling assertSearchResponse(..) the response is streamed one additional time with an arbitrary version, see assertVersionSerializable(..). - // Now, for version before 1.5.0 the score is computed after streaming the response but for scripts the script does not exists yet. - // assertSearchResponse() might therefore fail although there is no problem. - // This should be replaced by an exception in 2.0. - ESLoggerFactory.getLogger("script heuristic").warn("cannot compute score - script has not been initialized yet."); - return 0; - } - subsetSizeHolder.value = subsetSize; - supersetSizeHolder.value = supersetSize; - subsetDfHolder.value = subsetFreq; - supersetDfHolder.value = supersetFreq; - return ((Number) executableScript.run()).doubleValue(); + throw new UnsupportedOperationException("This scoring heuristic must have 'rewrite' called on it to provide a version ready for use"); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index db9711c1a8d..7b6cf699741 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -50,11 +50,23 @@ public abstract class SignificanceHeuristic implements NamedWriteable, ToXConten } } - public void initialize(InternalAggregation.ReduceContext reduceContext) { - + /** + * Provides a hook for subclasses to provide a version of the heuristic + * prepared for execution on data on the coordinating node. + * @param reduceContext the reduce context on the coordinating node + * @return a version of this heuristic suitable for execution + */ + public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext reduceContext) { + return this; } - public void initialize(SearchContext context) { - + /** + * Provides a hook for subclasses to provide a version of the heuristic + * prepared for execution on data on a shard. + * @param context the search context on the data node + * @return a version of this heuristic suitable for execution + */ + public SignificanceHeuristic rewrite(SearchContext context) { + return this; } } diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index ab38b120c86..9e4e0262080 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.IOUtils; @@ -53,8 +54,8 @@ import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.PortsRange; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -90,7 +91,6 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -366,6 +366,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i return Arrays.asList(recovery, bulk, reg, state, ping); } + @Override public synchronized void close() throws IOException { closeChannels(allChannels); } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 7871a0a6f39..69ad77fc91e 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -185,7 +185,7 @@ public class TribeService extends AbstractLifecycleComponent { private final List nodes = new CopyOnWriteArrayList<>(); public TribeService(Settings settings, ClusterService clusterService, final String tribeNodeId, - Collection> classpathPlugins) { + Function clientNodeBuilder) { super(settings); this.clusterService = clusterService; Map nodesSettings = new HashMap<>(settings.getGroups("tribe", true)); @@ -193,7 +193,7 @@ public class TribeService extends AbstractLifecycleComponent { nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client for (Map.Entry entry : nodesSettings.entrySet()) { Settings clientSettings = buildClientSettings(entry.getKey(), tribeNodeId, settings, entry.getValue()); - nodes.add(new TribeClientNode(clientSettings, classpathPlugins)); + nodes.add(clientNodeBuilder.apply(clientSettings)); } this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index cbd1f93491b..999f036d9f4 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.2.0.jar}" { +grant codeBase "${codebase.lucene-core-6.3.0-snapshot-a66a445.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.2.0.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-6.2.0.jar}" { +grant codeBase "${codebase.lucene-misc-6.3.0-snapshot-a66a445.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 43f6b62c3c3..1c780f96933 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.2.0.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.3.0-snapshot-a66a445.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-test-framework-6.2.0.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.randomizedtesting-runner-2.3.2.jar}" { +grant codeBase "${codebase.randomizedtesting-runner-2.4.0.jar}" { // optionally needed for access to private test methods (e.g. beforeClass) permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed to fail tests on uncaught exceptions from other threads diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index ae2f246639d..8a2e965a7b4 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -51,7 +51,6 @@ import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.AlreadyExpiredException; import org.elasticsearch.index.Index; -import org.elasticsearch.index.engine.IndexFailedEngineException; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; @@ -400,21 +399,6 @@ public class ExceptionSerializationTests extends ESTestCase { assertEquals("TIMESTAMP", ex.timestamp()); } - public void testIndexFailedEngineException() throws IOException { - ShardId id = new ShardId("foo", "_na_", 1); - IndexFailedEngineException ex = serialize(new IndexFailedEngineException(id, "type", "id", null)); - assertEquals(ex.getShardId(), new ShardId("foo", "_na_", 1)); - assertEquals("type", ex.type()); - assertEquals("id", ex.id()); - assertNull(ex.getCause()); - - ex = serialize(new IndexFailedEngineException(null, "type", "id", new NullPointerException())); - assertNull(ex.getShardId()); - assertEquals("type", ex.type()); - assertEquals("id", ex.id()); - assertTrue(ex.getCause() instanceof NullPointerException); - } - public void testAliasesMissingException() throws IOException { AliasesNotFoundException ex = serialize(new AliasesNotFoundException("one", "two", "three")); assertEquals("aliases [one, two, three] missing", ex.getMessage()); @@ -680,7 +664,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(25, org.elasticsearch.script.GeneralScriptException.class); ids.put(26, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class); ids.put(27, org.elasticsearch.snapshots.SnapshotCreationException.class); - ids.put(28, org.elasticsearch.index.engine.DeleteFailedEngineException.class); + ids.put(28, org.elasticsearch.index.engine.DeleteFailedEngineException.class); //deprecated in 6.0 ids.put(29, org.elasticsearch.index.engine.DocumentMissingException.class); ids.put(30, org.elasticsearch.snapshots.SnapshotException.class); ids.put(31, org.elasticsearch.indices.InvalidAliasNameException.class); @@ -732,7 +716,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(77, org.elasticsearch.common.util.concurrent.UncategorizedExecutionException.class); ids.put(78, org.elasticsearch.action.TimestampParsingException.class); ids.put(79, org.elasticsearch.action.RoutingMissingException.class); - ids.put(80, org.elasticsearch.index.engine.IndexFailedEngineException.class); + ids.put(80, org.elasticsearch.index.engine.IndexFailedEngineException.class); //deprecated in 6.0 ids.put(81, org.elasticsearch.index.snapshots.IndexShardRestoreFailedException.class); ids.put(82, org.elasticsearch.repositories.RepositoryException.class); ids.put(83, org.elasticsearch.transport.ReceiveTimeoutTransportException.class); diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index cc2f000fbae..167a36a96d1 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -23,6 +23,9 @@ import org.elasticsearch.action.ShardValidateQueryRequestTests; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.monitor.os.OsStats; +import org.elasticsearch.index.query.SimpleQueryStringBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -271,8 +274,10 @@ public class VersionTests extends ESTestCase { assertUnknownVersion(V_20_0_0_UNRELEASED); expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT)); assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(OsStats.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant + assertUnknownVersion(SimpleQueryStringBuilder.V_5_1_0_UNRELEASED); + assertUnknownVersion(QueryStringQueryBuilder.V_5_1_0_UNRELEASED); // once we released 5.0.0 and it's added to Version.java we need to remove this constant - assertUnknownVersion(ShardValidateQueryRequestTests.V_5_0_0); } public static void assertUnknownVersion(Version version) { diff --git a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java index ad2cabefdbc..1bc895095ba 100644 --- a/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/ExplainRequestTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action; +import org.elasticsearch.Version; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -92,7 +93,7 @@ public class ExplainRequestTests extends ESTestCase { .decode("AAABBWluZGV4BHR5cGUCaWQBDHNvbWVfcm91dGluZwEOdGhlX3ByZWZlcmVuY2UEdGVybT" + "+AAAAABWZpZWxkFQV2YWx1ZQIGYWxpYXMwBmFsaWFzMQECBmZpZWxkMQZmaWVsZDIBAQEIZmllbGQxLioBCGZpZWxkMi4qAA")); try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { - in.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + in.setVersion(Version.V_5_0_0); ExplainRequest readRequest = new ExplainRequest(); readRequest.readFrom(in); assertEquals(0, in.available()); @@ -104,7 +105,7 @@ public class ExplainRequestTests extends ESTestCase { assertEquals(request.routing(), readRequest.routing()); assertEquals(request.fetchSourceContext(), readRequest.fetchSourceContext()); BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + output.setVersion(Version.V_5_0_0); readRequest.writeTo(output); assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); } diff --git a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java index 34c6999f4e8..c1d18146a08 100644 --- a/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/ShardValidateQueryRequestTests.java @@ -42,7 +42,6 @@ import java.util.Collections; import java.util.List; public class ShardValidateQueryRequestTests extends ESTestCase { - public static final Version V_5_0_0 = Version.fromId(5000099); protected NamedWriteableRegistry namedWriteableRegistry; protected SearchRequestParsers searchRequestParsers; @@ -94,7 +93,7 @@ public class ShardValidateQueryRequestTests extends ESTestCase { // this is a base64 encoded request generated with the same input .decode("AAVpbmRleAZmb29iYXIBAQdpbmRpY2VzBAR0ZXJtP4AAAAAFZmllbGQVBXZhbHVlAgV0eXBlMQV0eXBlMgIGYWxpYXMwBmFsaWFzMQABAA")); try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { - in.setVersion(V_5_0_0); + in.setVersion(Version.V_5_0_0); ShardValidateQueryRequest readRequest = new ShardValidateQueryRequest(); readRequest.readFrom(in); assertEquals(0, in.available()); @@ -106,7 +105,7 @@ public class ShardValidateQueryRequestTests extends ESTestCase { assertEquals(request.rewrite(), readRequest.rewrite()); assertEquals(request.shardId(), readRequest.shardId()); BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(V_5_0_0); + output.setVersion(Version.V_5_0_0); readRequest.writeTo(output); assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 97c1a20c33f..23fdf3499b2 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Requests; @@ -31,7 +30,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; -import java.util.List; import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -47,14 +45,11 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { @TestLogging("_root:DEBUG") public void testDelayShards() throws Exception { logger.info("--> starting 3 nodes"); - List nodes = internalCluster().startNodesAsync(3).get(); + internalCluster().startNodesAsync(3).get(); // Wait for all 3 nodes to be up logger.info("--> waiting for 3 nodes to be up"); - assertBusy(() -> { - NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get(); - assertThat(resp.getNodes().size(), equalTo(3)); - }); + ensureStableCluster(3); logger.info("--> creating 'test' index"); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -66,7 +61,8 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase { logger.info("--> stopping a random node"); assertTrue(internalCluster().stopRandomDataNode()); - ensureYellow("test"); + logger.info("--> waiting for the master to remove the stopped node from the cluster state"); + ensureStableCluster(2); ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get(); ClusterAllocationExplanation cae = resp.getExplanation(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index be2a83af42c..6d0a0824490 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks; import com.carrotsearch.randomizedtesting.RandomizedContext; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -380,9 +380,9 @@ public class CancellableTasksTests extends TaskManagerTestCase { // Introduce an additional pseudo random repeatable race conditions String delayName = RandomizedContext.current().getRunnerSeedAsString() + ":" + nodeId + ":" + name; Random random = new Random(delayName.hashCode()); - if (RandomInts.randomIntBetween(random, 0, 10) < 1) { + if (RandomNumbers.randomIntBetween(random, 0, 10) < 1) { try { - Thread.sleep(RandomInts.randomIntBetween(random, 20, 50)); + Thread.sleep(RandomNumbers.randomIntBetween(random, 20, 50)); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index d5bc16207f5..af5909005ae 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -130,7 +130,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias") .addMaxIndexAgeCondition(TimeValue.timeValueHours(4)).get(); assertThat(response.getOldIndex(), equalTo("test_index-0")); - assertThat(response.getNewIndex(), equalTo("test_index-0")); + assertThat(response.getNewIndex(), equalTo("test_index-000001")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(false)); assertThat(response.getConditionStatus().size(), equalTo(1)); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index cfbeb79ef7d..260f70e19ed 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; -import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; @@ -101,7 +100,7 @@ public class BroadcastReplicationTests extends ESTestCase { transportService.start(); transportService.acceptIncomingRequests(); broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService, - new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), null); + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), null); } @After @@ -120,7 +119,7 @@ public class BroadcastReplicationTests extends ESTestCase { final String index = "test"; setState(clusterService, state(index, randomBoolean(), randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { if (randomBoolean()) { @@ -139,11 +138,11 @@ public class BroadcastReplicationTests extends ESTestCase { final String index = "test"; setState(clusterService, state(index, randomBoolean(), ShardRoutingState.STARTED)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { ReplicationResponse replicationResponse = new ReplicationResponse(); - replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(1, 1, new ReplicationResponse.ShardInfo.Failure[0])); + replicationResponse.setShardInfo(new ReplicationResponse.ShardInfo(1, 1)); shardRequests.v2().onResponse(replicationResponse); } logger.info("total shards: {}, ", response.get().getTotalShards()); @@ -154,7 +153,7 @@ public class BroadcastReplicationTests extends ESTestCase { final String index = "test"; int numShards = 1 + randomInt(3); setState(clusterService, stateWithAssignedPrimariesAndOneReplica(index, numShards)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Future response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index))); int succeeded = 0; int failed = 0; @@ -184,7 +183,7 @@ public class BroadcastReplicationTests extends ESTestCase { public void testNoShards() throws InterruptedException, ExecutionException, IOException { setState(clusterService, stateWithNoShard()); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); BroadcastResponse response = executeAndAssertImmediateResponse(broadcastReplicationAction, new DummyBroadcastRequest()); assertBroadcastResponse(0, 0, 0, response, null); } @@ -194,7 +193,7 @@ public class BroadcastReplicationTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); ClusterState clusterState = state(index, randomBoolean(), randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); List shards = broadcastReplicationAction.shards(new DummyBroadcastRequest().indices(shardId.getIndexName()), clusterState); assertThat(shards.size(), equalTo(1)); assertThat(shards.get(0), equalTo(shardId)); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index ab419a3c698..55485b590cf 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -144,6 +144,91 @@ public class ClusterStateCreationUtils { return state.build(); } + /** + * Creates cluster state with an index that has #(numberOfPrimaries) primary shards in the started state and no replicas. + * The cluster state contains #(numberOfNodes) nodes and assigns primaries to those nodes. + */ + public static ClusterState state(String index, final int numberOfNodes, final int numberOfPrimaries) { + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set nodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.add(node); + nodes.add(node.getId()); + } + discoBuilder.localNodeId(newNode(0).getId()); + discoBuilder.masterNodeId(randomFrom(nodes)); + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex()); + for (int i = 0; i < numberOfPrimaries; i++) { + ShardId shardId = new ShardId(indexMetaData.getIndex(), i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, randomFrom(nodes), true, ShardRoutingState.STARTED)); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build()); + } + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + state.routingTable(RoutingTable.builder().add(indexRoutingTable).build()); + return state.build(); + } + + + + /** + * Creates cluster state with the given indices, each index containing #(numberOfPrimaries) + * started primary shards and no replicas. The cluster state contains #(numberOfNodes) nodes + * and assigns primaries to those nodes. + */ + public static ClusterState state(final int numberOfNodes, final String[] indices, final int numberOfPrimaries) { + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set nodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.add(node); + nodes.add(node.getId()); + } + discoBuilder.localNodeId(newNode(0).getId()); + discoBuilder.masterNodeId(newNode(0).getId()); + MetaData.Builder metaData = MetaData.builder(); + RoutingTable.Builder routingTable = RoutingTable.builder(); + for (String index : indices) { + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaries).put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex()); + for (int i = 0; i < numberOfPrimaries; i++) { + ShardId shardId = new ShardId(indexMetaData.getIndex(), i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, randomFrom(nodes), true, ShardRoutingState.STARTED)); + indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build()); + } + + metaData.put(indexMetaData, false); + routingTable.add(indexRoutingTable); + } + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(metaData.generateClusterUuidIfNeeded().build()); + state.routingTable(routingTable.build()); + return state.build(); + } + /** * Creates cluster state with several shards and one replica and all shards STARTED. */ diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index b5edc1b53c5..a49c2ae978e 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -256,7 +256,7 @@ public class ReplicationOperationTests extends ESTestCase { final ClusterState initialState, final ClusterState changedState) throws Exception { AtomicReference state = new AtomicReference<>(initialState); - logger.debug("--> using initial state:\n{}", state.get().prettyPrint()); + logger.debug("--> using initial state:\n{}", state.get()); final long primaryTerm = initialState.getMetaData().index(shardId.getIndexName()).primaryTerm(shardId.id()); final ShardRouting primaryShard = state.get().routingTable().shardRoutingTable(shardId).primaryShard(); final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) { @@ -264,7 +264,7 @@ public class ReplicationOperationTests extends ESTestCase { public Result perform(Request request) throws Exception { Result result = super.perform(request); state.set(changedState); - logger.debug("--> state after primary operation:\n{}", state.get().prettyPrint()); + logger.debug("--> state after primary operation:\n{}", state.get()); return result; } }; @@ -303,8 +303,7 @@ public class ReplicationOperationTests extends ESTestCase { logger.debug("using active shard count of [{}], assigned shards [{}], total shards [{}]." + " expecting op to [{}]. using state: \n{}", request.waitForActiveShards(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, - passesActiveShardCheck ? "succeed" : "retry", - state.prettyPrint()); + passesActiveShardCheck ? "succeed" : "retry", state); final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(index).shard(shardId.id()); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java similarity index 55% rename from core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java rename to core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java index d9520aef768..3740f8dd5f7 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeClientNode.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -17,20 +17,23 @@ * under the License. */ -package org.elasticsearch.tribe; +package org.elasticsearch.action.support.replication; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.Node; -import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESTestCase; -import java.util.Collection; +import java.util.Locale; -/** - * An internal node that connects to a remove cluster, as part of a tribe node. - */ -class TribeClientNode extends Node { - TribeClientNode(Settings settings, Collection> classpathPlugins) { - super(new Environment(settings), classpathPlugins); +import static org.hamcrest.CoreMatchers.equalTo; + +public class ReplicationResponseTests extends ESTestCase { + + public void testShardInfoToString() { + final int total = 5; + final int successful = randomIntBetween(1, total); + final ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful); + assertThat( + shardInfo.toString(), + equalTo(String.format(Locale.ROOT, "ShardInfo{total=5, successful=%d, failures=[]}", successful))); } + } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 3deae74f455..709c4b830ea 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -77,7 +77,7 @@ import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; -import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -227,7 +227,7 @@ public class TransportReplicationActionTests extends ESTestCase { randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); ReplicationTask task = maybeTask(); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Request request = new Request(shardId).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); @@ -246,7 +246,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertTrue(request.isRetrySet.get()); setState(clusterService, state(index, true, ShardRoutingState.STARTED)); - logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> primary assigned state:\n{}", clusterService.state()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); @@ -275,7 +275,7 @@ public class TransportReplicationActionTests extends ESTestCase { String relocationTargetNode = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId(); state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build(); setState(clusterService, state); - logger.debug("--> relocation ongoing state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> relocation ongoing state:\n{}", clusterService.state()); Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1); PlainActionFuture listener = new PlainActionFuture<>(); @@ -295,10 +295,10 @@ public class TransportReplicationActionTests extends ESTestCase { ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId) .shardsWithState(ShardRoutingState.INITIALIZING).get(0); AllocationService allocationService = ESAllocationTestCase.createAllocationService(); - ClusterState updatedState = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget)); + ClusterState updatedState = allocationService.applyStartedShards(state, Collections.singletonList(relocationTarget)); setState(clusterService, updatedState); - logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> relocation complete state:\n{}", clusterService.state()); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); @@ -315,7 +315,7 @@ public class TransportReplicationActionTests extends ESTestCase { // no replicas in oder to skip the replication part setState(clusterService, state(index, true, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Request request = new Request(new ShardId("unknown_index", "_na_", 0)).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); @@ -339,7 +339,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); // no replicas in order to skip the replication part setState(clusterService, stateWithActivePrimary(index, true, randomInt(3))); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); Request request = new Request(shardId); boolean timeout = randomBoolean(); if (timeout) { @@ -396,7 +396,7 @@ public class TransportReplicationActionTests extends ESTestCase { ReplicationTask task = maybeTask(); setState(clusterService, stateWithActivePrimary(index, randomBoolean(), 3)); - logger.debug("using state: \n{}", clusterService.state().prettyPrint()); + logger.debug("using state: \n{}", clusterService.state()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); @@ -545,7 +545,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); ClusterState state = stateWithActivePrimary(index, true, 1 + randomInt(3), randomInt(2)); - logger.info("using state: {}", state.prettyPrint()); + logger.info("using state: {}", state); setState(clusterService, state); // check that at unknown node fails @@ -651,7 +651,7 @@ public class TransportReplicationActionTests extends ESTestCase { // no replica, we only want to test on primary final ClusterState state = state(index, true, ShardRoutingState.STARTED); setState(clusterService, state); - logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + logger.debug("--> using initial state:\n{}", clusterService.state()); final ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard(); Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); @@ -867,8 +867,11 @@ public class TransportReplicationActionTests extends ESTestCase { final CapturingTransport.CapturedRequest capturedRequest = capturedRequests.get(0); assertThat(capturedRequest.action, equalTo("testActionWithExceptions[r]")); assertThat(capturedRequest.request, instanceOf(TransportReplicationAction.ConcreteShardRequest.class)); - assertThat(((TransportReplicationAction.ConcreteShardRequest) capturedRequest.request).getRequest(), equalTo(request)); - assertThat(((TransportReplicationAction.ConcreteShardRequest) capturedRequest.request).getTargetAllocationID(), + final TransportReplicationAction.ConcreteShardRequest concreteShardRequest = + (TransportReplicationAction.ConcreteShardRequest) capturedRequest.request; + assertThat(concreteShardRequest.getRequest(), equalTo(request)); + assertThat(concreteShardRequest.getRequest().isRetrySet.get(), equalTo(true)); + assertThat(concreteShardRequest.getTargetAllocationID(), equalTo(replica.allocationId().getId())); } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 14afe4dee9b..571bbfa72e0 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -28,9 +28,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -71,6 +71,7 @@ public class TransportWriteActionTests extends ESTestCase { CapturingActionListener listener = new CapturingActionListener<>(); responder.accept(result, listener); assertNotNull(listener.response); + assertNull(listener.failure); verify(indexShard, never()).refresh(any()); verify(indexShard, never()).addRefreshListener(any(), any()); } @@ -92,6 +93,7 @@ public class TransportWriteActionTests extends ESTestCase { CapturingActionListener listener = new CapturingActionListener<>(); responder.accept(result, listener); assertNotNull(listener.response); + assertNull(listener.failure); responseChecker.accept(listener.response); verify(indexShard).refresh("refresh_flag_index"); verify(indexShard, never()).addRefreshListener(any(), any()); @@ -125,31 +127,75 @@ public class TransportWriteActionTests extends ESTestCase { boolean forcedRefresh = randomBoolean(); refreshListener.getValue().accept(forcedRefresh); assertNotNull(listener.response); + assertNull(listener.failure); resultChecker.accept(listener.response, forcedRefresh); } - private class TestAction extends TransportWriteAction { + public void testDocumentFailureInShardOperationOnPrimary() throws Exception { + TestRequest request = new TestRequest(); + TestAction testAction = new TestAction(true, true); + TransportWriteAction.WritePrimaryResult writePrimaryResult = + testAction.shardOperationOnPrimary(request, indexShard); + CapturingActionListener listener = new CapturingActionListener<>(); + writePrimaryResult.respond(listener); + assertNull(listener.response); + assertNotNull(listener.failure); + } + + public void testDocumentFailureInShardOperationOnReplica() throws Exception { + TestRequest request = new TestRequest(); + TestAction testAction = new TestAction(randomBoolean(), true); + TransportWriteAction.WriteReplicaResult writeReplicaResult = + testAction.shardOperationOnReplica(request, indexShard); + CapturingActionListener listener = new CapturingActionListener<>(); + writeReplicaResult.respond(listener); + assertNull(listener.response); + assertNotNull(listener.failure); + } + + private class TestAction extends TransportWriteAction { + + private final boolean withDocumentFailureOnPrimary; + private final boolean withDocumentFailureOnReplica; + protected TestAction() { + this(false, false); + } + protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentFailureOnReplica) { super(Settings.EMPTY, "test", new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null), null, null, null, null, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, - ThreadPool.Names.SAME); - } - - @Override - protected WriteResult onPrimaryShard(TestRequest request, IndexShard indexShard) throws Exception { - return new WriteResult<>(new TestResponse(), location); - } - - @Override - protected Location onReplicaShard(TestRequest request, IndexShard indexShard) { - return location; + TestRequest::new, ThreadPool.Names.SAME); + this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; + this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; } @Override protected TestResponse newResponseInstance() { return new TestResponse(); } + + @Override + protected WritePrimaryResult shardOperationOnPrimary(TestRequest request, IndexShard primary) throws Exception { + final WritePrimaryResult primaryResult; + if (withDocumentFailureOnPrimary) { + primaryResult = new WritePrimaryResult(request, null, null, new RuntimeException("simulated"), primary); + } else { + primaryResult = new WritePrimaryResult(request, new TestResponse(), location, null, primary); + } + return primaryResult; + } + + @Override + protected WriteReplicaResult shardOperationOnReplica(TestRequest request, IndexShard replica) throws Exception { + final WriteReplicaResult replicaResult; + if (withDocumentFailureOnReplica) { + replicaResult = new WriteReplicaResult(request, null, new RuntimeException("simulated"), replica); + } else { + replicaResult = new WriteReplicaResult(request, location, null, replica); + } + return replicaResult; + } } private static class TestRequest extends ReplicatedWriteRequest { @@ -169,6 +215,7 @@ public class TransportWriteActionTests extends ESTestCase { private static class CapturingActionListener implements ActionListener { private R response; + private Exception failure; @Override public void onResponse(R response) { @@ -176,8 +223,8 @@ public class TransportWriteActionTests extends ESTestCase { } @Override - public void onFailure(Exception e) { - throw new RuntimeException(e); + public void onFailure(Exception failure) { + this.failure = failure; } } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 7bcc00da5ce..3009f7d5c3b 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -82,6 +82,7 @@ import java.util.SortedSet; import java.util.TreeSet; import static org.elasticsearch.test.OldIndexUtils.assertUpgradeWorks; +import static org.elasticsearch.test.OldIndexUtils.getIndexDir; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -445,8 +446,15 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { throw new IllegalStateException("Backwards index must contain exactly one cluster"); } - // the bwc scripts packs the indices under this path - return list[0].resolve("nodes/0/"); + int zipIndex = indexFile.indexOf(".zip"); + final Version version = Version.fromString(indexFile.substring("index-".length(), zipIndex)); + if (version.before(Version.V_5_0_0_alpha1)) { + // the bwc scripts packs the indices under this path + return list[0].resolve("nodes/0/"); + } else { + // after 5.0.0, data folders do not include the cluster name + return list[0].resolve("0"); + } } public void testOldClusterStates() throws Exception { @@ -481,9 +489,19 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-"); Path nodeDir = getNodeDir(indexFile); logger.info("Parsing cluster state files from index [{}]", indexName); - assertNotNull(globalFormat.loadLatestState(logger, nodeDir)); // no exception - Path indexDir = nodeDir.resolve("indices").resolve(indexName); - assertNotNull(indexFormat.loadLatestState(logger, indexDir)); // no exception + final MetaData metaData = globalFormat.loadLatestState(logger, nodeDir); + assertNotNull(metaData); + + final Version version = Version.fromString(indexName.substring("index-".length())); + final Path dataDir; + if (version.before(Version.V_5_0_0_alpha1)) { + dataDir = nodeDir.getParent().getParent(); + } else { + dataDir = nodeDir.getParent(); + } + final Path indexDir = getIndexDir(logger, indexName, indexFile, dataDir); + assertNotNull(indexFormat.loadLatestState(logger, indexDir)); } } + } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java index d7ed0d8db5e..ec8c12cb525 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.bwcompat; +import org.elasticsearch.Version; import org.elasticsearch.common.io.FileTestUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -70,7 +71,12 @@ public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase { final Set snapshotInfos = Sets.newHashSet(getSnapshots(repoName)); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo originalSnapshot = snapshotInfos.iterator().next(); - assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", "test_1"))); + if (Version.fromString(version).before(Version.V_5_0_0_alpha1)) { + assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", "test_1"))); + } else { + assertThat(originalSnapshot.snapshotId().getName(), equalTo("test_1")); + assertNotNull(originalSnapshot.snapshotId().getUUID()); // it's a random UUID now + } assertThat(Sets.newHashSet(originalSnapshot.indices()), equalTo(indices)); logger.info("--> restore the original snapshot"); diff --git a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 9d2c176dffb..bc771f5721d 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -19,10 +19,7 @@ package org.elasticsearch.client.transport; -import com.carrotsearch.randomizedtesting.generators.RandomInts; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.Lifecycle; @@ -37,7 +34,6 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportServiceAdapter; import java.io.IOException; @@ -83,7 +79,7 @@ abstract class FailAndRetryMockTransport imp //once nodes are connected we'll just return errors for each sendRequest call triedNodes.add(node); - if (RandomInts.randomInt(random, 100) > 10) { + if (random.nextInt(100) > 10) { connectTransportExceptions.incrementAndGet(); throw new ConnectTransportException(node, "node not available"); } else { diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index 86b64febb04..fac8d5f7b63 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -219,7 +219,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { ensureSearchable("test1", "test2"); ClusterStateResponse clusterState = client().admin().cluster().prepareState().get(); - logger.info("Cluster state:\n{}", clusterState.getState().prettyPrint()); + logger.info("Cluster state:\n{}", clusterState.getState()); internalCluster().stopRandomDataNode(); assertTrue(awaitBusy(() -> { diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 599b62b1ee2..863349e897a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -84,19 +84,19 @@ public class NodeConnectionsServiceTests extends ESTestCase { ClusterState current = clusterStateFromNodes(Collections.emptyList()); ClusterChangedEvent event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); - service.connectToAddedNodes(event); + service.connectToNodes(event.nodesDelta().addedNodes()); assertConnected(event.nodesDelta().addedNodes()); - service.disconnectFromRemovedNodes(event); + service.disconnectFromNodes(event.nodesDelta().removedNodes()); assertConnectedExactlyToNodes(event.state()); current = event.state(); event = new ClusterChangedEvent("test", clusterStateFromNodes(randomSubsetOf(nodes)), current); - service.connectToAddedNodes(event); + service.connectToNodes(event.nodesDelta().addedNodes()); assertConnected(event.nodesDelta().addedNodes()); - service.disconnectFromRemovedNodes(event); + service.disconnectFromNodes(event.nodesDelta().removedNodes()); assertConnectedExactlyToNodes(event.state()); } @@ -110,7 +110,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { transport.randomConnectionExceptions = true; - service.connectToAddedNodes(event); + service.connectToNodes(event.nodesDelta().addedNodes()); for (int i = 0; i < 3; i++) { // simulate disconnects diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 1dd1abf4ce1..eb5c88d7e83 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -277,9 +277,9 @@ public class ClusterStateHealthTests extends ESTestCase { // if the inactive primaries are due solely to recovery (not failed allocation or previously being allocated) // then cluster health is YELLOW, otherwise RED if (primaryInactiveDueToRecovery(indexName, clusterState)) { - assertThat("clusterState is:\n" + clusterState.prettyPrint(), health.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); + assertThat("clusterState is:\n" + clusterState, health.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } else { - assertThat("clusterState is:\n" + clusterState.prettyPrint(), health.getStatus(), equalTo(ClusterHealthStatus.RED)); + assertThat("clusterState is:\n" + clusterState, health.getStatus(), equalTo(ClusterHealthStatus.RED)); } } } diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index a16520faeb1..342919fb881 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -135,8 +135,8 @@ public class DiscoveryNodesTests extends ESTestCase { final DiscoveryNodes discoNodesA = builderA.build(); final DiscoveryNodes discoNodesB = builderB.build(); - logger.info("nodes A: {}", discoNodesA.prettyPrint()); - logger.info("nodes B: {}", discoNodesB.prettyPrint()); + logger.info("nodes A: {}", discoNodesA); + logger.info("nodes B: {}", discoNodesB); DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 6243f138380..0d284a1e47e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -152,7 +152,9 @@ public class PrimaryAllocationIT extends ESIntegTestCase { client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)).get(); logger.info("--> wait until shard is failed and becomes unassigned again"); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertBusy(() -> + assertTrue(client().admin().cluster().prepareState().get().getState().toString(), + client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 60f1688ad3d..4fffcebc79b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -306,7 +306,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); // make sure both replicas are marked as delayed (i.e. not reallocated) clusterState = allocation.deassociateDeadNodes(clusterState, true, "reroute"); - assertThat(clusterState.prettyPrint(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); + assertThat(clusterState.toString(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); } public void testFindNextDelayedAllocation() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index ed7a944963d..e658ff03a18 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -89,7 +89,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { clusterState = addNodes(clusterState, service, 1, nodeOffset++); assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2)); assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0)); - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); } public void testMinimalRelocations() { @@ -150,7 +150,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); assertThat(newState, equalTo(clusterState)); assertNumIndexShardsPerNode(clusterState, equalTo(2)); - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); } public void testMinimalRelocationsNoLimit() { @@ -212,7 +212,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); assertThat(newState, equalTo(clusterState)); assertNumIndexShardsPerNode(clusterState, equalTo(2)); - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java new file mode 100644 index 00000000000..806e136bba3 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -0,0 +1,276 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.NodeRebalanceDecision; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.RebalanceDecision; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.startsWith; + +/** + * Tests for balancing a single shard, see {@link Balancer#decideRebalance(ShardRouting)}. + */ +public class BalancedSingleShardTests extends ESAllocationTestCase { + + public void testRebalanceNonStartedShardNotAllowed() { + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), + randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.UNASSIGNED, ShardRoutingState.RELOCATING)); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState)); + assertSame(RebalanceDecision.NOT_TAKEN, rebalanceDecision); + } + + public void testRebalanceNotAllowedDuringPendingAsyncFetch() { + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.emptyList()), clusterState); + routingAllocation.setHasPendingAsyncFetch(); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + assertNotNull(rebalanceDecision.getCanRebalanceDecision()); + assertEquals(Type.NO, rebalanceDecision.getFinalDecisionType()); + assertThat(rebalanceDecision.getFinalExplanation(), startsWith("cannot rebalance due to in-flight shard store fetches")); + assertNull(rebalanceDecision.getNodeDecisions()); + assertNull(rebalanceDecision.getAssignedNodeId()); + + assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); + } + + public void testRebalancingNotAllowedDueToCanRebalance() { + final Decision canRebalanceDecision = randomFrom(Decision.NO, Decision.THROTTLE); + AllocationDecider noRebalanceDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return allocation.decision(canRebalanceDecision, "TEST", "foobar"); + } + }; + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", randomBoolean(), ShardRoutingState.STARTED); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, Collections.singleton(noRebalanceDecider)), clusterState); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + assertEquals(canRebalanceDecision.type(), rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(canRebalanceDecision.type(), rebalanceDecision.getFinalDecisionType()); + assertEquals("rebalancing is not allowed", rebalanceDecision.getFinalExplanation()); + assertNotNull(rebalanceDecision.getNodeDecisions()); + assertNull(rebalanceDecision.getAssignedNodeId()); + assertEquals(1, rebalanceDecision.getCanRebalanceDecision().getDecisions().size()); + for (Decision subDecision : rebalanceDecision.getCanRebalanceDecision().getDecisions()) { + assertEquals("foobar", ((Decision.Single) subDecision).getExplanation()); + } + + assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); + } + + public void testRebalancePossible() { + AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Decision.YES; + } + }; + Tuple rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, true); + ClusterState clusterState = rebalance.v1(); + RebalanceDecision rebalanceDecision = rebalance.v2(); + assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.YES, rebalanceDecision.getFinalDecisionType()); + assertNotNull(rebalanceDecision.getFinalExplanation()); + assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); + assertNotNull(rebalanceDecision.getAssignedNodeId()); + } + + public void testRebalancingNotAllowedDueToCanAllocate() { + AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Decision.NO; + } + }; + Tuple rebalance = setupStateAndRebalance(canAllocateDecider, Settings.EMPTY, false); + ClusterState clusterState = rebalance.v1(); + RebalanceDecision rebalanceDecision = rebalance.v2(); + assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.NO, rebalanceDecision.getFinalDecisionType()); + assertThat(rebalanceDecision.getFinalExplanation(), + startsWith("cannot rebalance shard, no other node exists that would form a more balanced")); + assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); + assertNull(rebalanceDecision.getAssignedNodeId()); + } + + public void testDontBalanceShardWhenThresholdNotMet() { + AllocationDecider canAllocateDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Decision.YES; + } + }; + // ridiculously high threshold setting so we won't rebalance + Settings balancerSettings = Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 1000f).build(); + Tuple rebalance = setupStateAndRebalance(canAllocateDecider, balancerSettings, false); + ClusterState clusterState = rebalance.v1(); + RebalanceDecision rebalanceDecision = rebalance.v2(); + assertEquals(Type.YES, rebalanceDecision.getCanRebalanceDecision().type()); + assertEquals(Type.NO, rebalanceDecision.getFinalDecisionType()); + assertNotNull(rebalanceDecision.getFinalExplanation()); + assertEquals(clusterState.nodes().getSize() - 1, rebalanceDecision.getNodeDecisions().size()); + assertNull(rebalanceDecision.getAssignedNodeId()); + } + + public void testSingleShardBalanceProducesSameResultsAsBalanceStep() { + final String[] indices = { "idx1", "idx2" }; + // Create a cluster state with 2 indices, each with 1 started primary shard, and only + // one node initially so that all primary shards get allocated to the same node. We are only + // using 2 indices (i.e. 2 total primary shards) because if we have any more than 2 started shards + // in the routing table, then we have no guarantees about the order in which the 3 or more shards + // are selected to be rebalanced to the new node, and hence the node to which they are rebalanced + // is not deterministic. Using only two shards guarantees that only one of those two shards will + // be rebalanced, and so we pick the one that was chosen to be rebalanced and execute the single-shard + // rebalance step on it to make sure it gets assigned to the same node. + ClusterState clusterState = ClusterStateCreationUtils.state(1, indices, 1); + // add new nodes so one of the primaries can be rebalanced + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); + int numAddedNodes = randomIntBetween(1, 5); + // randomly select a subset of the newly added nodes to set filter allocation on (but not all) + int excludeNodesSize = randomIntBetween(0, numAddedNodes - 1); + final Set excludeNodes = new HashSet<>(); + for (int i = 0; i < numAddedNodes; i++) { + DiscoveryNode discoveryNode = newNode(randomAsciiOfLength(7)); + nodesBuilder.add(discoveryNode); + if (i < excludeNodesSize) { + excludeNodes.add(discoveryNode.getId()); + } + } + clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + + AllocationDecider allocationDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (excludeNodes.contains(node.nodeId())) { + return Decision.NO; + } + return Decision.YES; + } + }; + AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return Decision.YES; + } + }; + List allocationDeciders = Arrays.asList(rebalanceDecider, allocationDecider); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + // allocate and get the node that is now relocating + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); + allocator.allocate(routingAllocation); + ShardRouting shardToRebalance = null; + for (RoutingNode routingNode : routingAllocation.routingNodes()) { + List relocatingShards = routingNode.shardsWithState(ShardRoutingState.RELOCATING); + if (relocatingShards.size() > 0) { + shardToRebalance = randomFrom(relocatingShards); + break; + } + } + + routingAllocation = newRoutingAllocation(new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + routingAllocation.debugDecision(true); + ShardRouting shard = clusterState.getRoutingNodes().activePrimary(shardToRebalance.shardId()); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + assertEquals(shardToRebalance.relocatingNodeId(), rebalanceDecision.getAssignedNodeId()); + // make sure all excluded nodes returned a NO decision + for (String exludedNode : excludeNodes) { + NodeRebalanceDecision nodeRebalanceDecision = rebalanceDecision.getNodeDecisions().get(exludedNode); + assertEquals(Type.NO, nodeRebalanceDecision.getCanAllocateDecision().type()); + } + } + + private Tuple setupStateAndRebalance(AllocationDecider allocationDecider, + Settings balancerSettings, + boolean rebalanceExpected) { + AllocationDecider rebalanceDecider = new AllocationDecider(Settings.EMPTY) { + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return Decision.YES; + } + }; + List allocationDeciders = Arrays.asList(rebalanceDecider, allocationDecider); + final int numShards = randomIntBetween(8, 13); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(balancerSettings); + ClusterState clusterState = ClusterStateCreationUtils.state("idx", 2, numShards); + // add a new node so shards can be rebalanced there + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); + nodesBuilder.add(newNode(randomAsciiOfLength(7))); + clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); + ShardRouting shard = clusterState.routingTable().index("idx").shard(0).primaryShard(); + RoutingAllocation routingAllocation = newRoutingAllocation( + new AllocationDeciders(Settings.EMPTY, allocationDeciders), clusterState); + RebalanceDecision rebalanceDecision = allocator.decideRebalance(shard, routingAllocation); + + if (rebalanceExpected == false) { + assertAssignedNodeRemainsSame(allocator, routingAllocation, shard); + } + + return Tuple.tuple(clusterState, rebalanceDecision); + } + + private RoutingAllocation newRoutingAllocation(AllocationDeciders deciders, ClusterState state) { + RoutingAllocation allocation = new RoutingAllocation( + deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false + ); + allocation.debugDecision(true); + return allocation; + } + + private void assertAssignedNodeRemainsSame(BalancedShardsAllocator allocator, RoutingAllocation routingAllocation, + ShardRouting originalRouting) { + allocator.allocate(routingAllocation); + RoutingNodes routingNodes = routingAllocation.routingNodes(); + // make sure the previous node id is the same as the current one after rerouting + assertEquals(originalRouting.currentNodeId(), routingNodes.activePrimary(originalRouting.shardId()).currentNodeId()); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index a56af9f2b39..f48c9acb356 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -366,7 +366,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } private ClusterState stabilize(ClusterState clusterState, AllocationService service) { - logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); clusterState = service.deassociateDeadNodes(clusterState, true, "reroute"); RoutingNodes routingNodes = clusterState.getRoutingNodes(); @@ -375,7 +375,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { logger.info("complete rebalancing"); boolean changed; do { - logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); ClusterState newState = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)); changed = newState.equals(clusterState) == false; clusterState = newState; @@ -386,7 +386,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } private void assertRecoveryNodeVersions(RoutingNodes routingNodes) { - logger.trace("RoutingNodes: {}", routingNodes.prettyPrint()); + logger.trace("RoutingNodes: {}", routingNodes); List mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.RELOCATING); for (ShardRouting r : mutableShardRoutings) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 6722e048030..23992b91541 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -159,7 +159,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { } while (clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200); - logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState.prettyPrint()); + logger.info("Done Balancing after [{}] iterations. State:\n{}", iterations, clusterState); // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200)); assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 454e8410484..74d3dda8e36 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -70,15 +70,14 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> test starting of shard"); ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(initShard)); - assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable().prettyPrint(), - newState, not(equalTo(state))); - assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable().prettyPrint(), + assertThat("failed to start " + initShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); + assertTrue(initShard + "isn't started \ncurrent routing table:" + newState.routingTable(), newState.routingTable().index("test").shard(initShard.id()).allShardsStarted()); state = newState; logger.info("--> testing starting of relocating shards"); newState = allocation.applyStartedShards(state, Arrays.asList(relocatingShard.getTargetRelocatingShard())); - assertThat("failed to start " + relocatingShard + "\ncurrent routing table:" + newState.routingTable().prettyPrint(), + assertThat("failed to start " + relocatingShard + "\ncurrent routing table:" + newState.routingTable(), newState, not(equalTo(state))); ShardRouting shardRouting = newState.routingTable().index("test").shard(relocatingShard.id()).getShards().get(0); assertThat(shardRouting.state(), equalTo(ShardRoutingState.STARTED)); diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 3f1e8f032ca..00d9a8ff096 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.cluster.ESAllocationTestCase; import static org.hamcrest.Matchers.equalTo; @@ -56,7 +56,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { assertThat(serializedClusterState.getClusterName().value(), equalTo(clusterState.getClusterName().value())); - assertThat(serializedClusterState.routingTable().prettyPrint(), equalTo(clusterState.routingTable().prettyPrint())); + assertThat(serializedClusterState.routingTable().toString(), equalTo(clusterState.routingTable().toString())); } public void testRoutingTableSerialization() throws Exception { @@ -81,7 +81,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { StreamInput inStream = outStream.bytes().streamInput(); RoutingTable target = RoutingTable.Builder.readFrom(inStream); - assertThat(target.prettyPrint(), equalTo(source.prettyPrint())); + assertThat(target.toString(), equalTo(source.toString())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java index 69d18933e6c..9ce3d1fcee8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -28,8 +29,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.cluster.ESAllocationTestCase; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -55,7 +56,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(); clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build(); - String clusterStateString = clusterState.toString(); + String clusterStateString = Strings.toString(clusterState, true); assertNotNull(clusterStateString); assertThat(clusterStateString, containsString("test_idx")); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index c12b54e71ef..a39bcf38391 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -41,6 +41,8 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -119,12 +121,12 @@ public class ClusterServiceTests extends ESTestCase { emptySet(), Version.CURRENT)); timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToAddedNodes(ClusterChangedEvent event) { + public void connectToNodes(List addedNodes) { // skip } @Override - public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + public void disconnectFromNodes(List removedNodes) { // skip } }); @@ -970,6 +972,70 @@ public class ClusterServiceTests extends ESTestCase { mockAppender.assertAllExpectationsMatched(); } + public void testDisconnectFromNewlyAddedNodesIfClusterStatePublishingFails() throws InterruptedException { + TimedClusterService timedClusterService = new TimedClusterService(Settings.builder().put("cluster.name", + "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool); + timedClusterService.setLocalNode(new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), + emptySet(), Version.CURRENT)); + Set currentNodes = Collections.synchronizedSet(new HashSet<>()); + currentNodes.add(timedClusterService.localNode()); + timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { + @Override + public void connectToNodes(List addedNodes) { + currentNodes.addAll(addedNodes); + } + + @Override + public void disconnectFromNodes(List removedNodes) { + currentNodes.removeAll(removedNodes); + } + }); + AtomicBoolean failToCommit = new AtomicBoolean(); + timedClusterService.setClusterStatePublisher((event, ackListener) -> { + if (failToCommit.get()) { + throw new Discovery.FailedToCommitClusterStateException("just to test this"); + } + }); + timedClusterService.start(); + ClusterState state = timedClusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes) + .masterNodeId(nodes.getLocalNodeId()); + state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) + .nodes(nodesBuilder).build(); + setState(timedClusterService, state); + + assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes()))); + + final CountDownLatch latch = new CountDownLatch(1); + + // try to add node when cluster state publishing fails + failToCommit.set(true); + timedClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + DiscoveryNode newNode = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), + emptySet(), Version.CURRENT); + return ClusterState.builder(currentState).nodes(DiscoveryNodes.builder(currentState.nodes()).add(newNode)).build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Exception e) { + latch.countDown(); + } + }); + + latch.await(); + assertThat(currentNodes, equalTo(Sets.newHashSet(timedClusterService.state().getNodes()))); + timedClusterService.close(); + } + private static class SimpleTask { private final int id; diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 44763d5ccce..f51a85b2f9a 100644 --- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -238,6 +238,9 @@ public class BytesStreamsTests extends ESTestCase { assertEquals(position, out.position()); assertEquals(position, BytesReference.toBytes(out.bytes()).length); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> out.seek(Integer.MAX_VALUE + 1L)); + assertEquals("BytesStreamOutput cannot hold more than 2GB of data", iae.getMessage()); + out.close(); } @@ -251,6 +254,9 @@ public class BytesStreamsTests extends ESTestCase { out.skip(forward); assertEquals(position + forward, out.position()); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> out.skip(Integer.MAX_VALUE - 50)); + assertEquals("BytesStreamOutput cannot hold more than 2GB of data", iae.getMessage()); + out.close(); } diff --git a/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java b/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java index 505196a97f6..ca9a6b3a1ab 100644 --- a/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java @@ -82,8 +82,24 @@ public class DateMathParserTests extends ESTestCase { // timezone works within date format assertDateMathEquals("2014-05-30T20:21+02:00", "2014-05-30T18:21:00.000"); + // test alternative ways of writing zero offsets, according to ISO 8601 +00:00, +00, +0000 should work. + // joda also seems to allow for -00:00, -00, -0000 + assertDateMathEquals("2014-05-30T18:21+00:00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21+00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21+0000", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21-00:00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21-00", "2014-05-30T18:21:00.000"); + assertDateMathEquals("2014-05-30T18:21-0000", "2014-05-30T18:21:00.000"); + // but also externally assertDateMathEquals("2014-05-30T20:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+02:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+00:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+00:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+0000")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("-00:00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("-00")); + assertDateMathEquals("2014-05-30T18:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("-0000")); // and timezone in the date has priority assertDateMathEquals("2014-05-30T20:21+03:00", "2014-05-30T17:21:00.000", 0, false, DateTimeZone.forID("-08:00")); diff --git a/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java b/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java deleted file mode 100644 index 8fabbcc60ae..00000000000 --- a/core/src/test/java/org/elasticsearch/common/util/ExtensionPointTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.util; - -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - -import org.elasticsearch.common.inject.Binder; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.test.ESTestCase; - -public class ExtensionPointTests extends ESTestCase { - - public void testClassSet() { - final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("test_class", TestBaseClass.class, Consumer.class); - allocationDeciders.registerExtension(TestImpl.class); - Injector injector = new ModulesBuilder().add(new Module() { - @Override - public void configure(Binder binder) { - allocationDeciders.bind(binder); - } - }).createInjector(); - assertEquals(1, TestImpl.instances.get()); - - } - - public static class TestBaseClass {} - - public static class Consumer { - @Inject - public Consumer(Set deciders, TestImpl other) { - // we require the TestImpl more than once to ensure it's bound as a singleton - } - } - - public static class TestImpl extends TestBaseClass { - static final AtomicInteger instances = new AtomicInteger(0); - - @Inject - public TestImpl() { - instances.incrementAndGet(); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index 94fc0d88752..916926e36a4 100644 --- a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -211,11 +211,11 @@ public class IndexFolderUpgraderTests extends ESTestCase { throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length); } // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); + Path src = OldIndexUtils.getIndexDir(logger, indexName, path.getFileName().toString(), list[0]); assertTrue("[" + path + "] missing index dir: " + src.toString(), Files.exists(src)); final Path indicesPath = randomFrom(nodeEnvironment.nodePaths()).indicesPath; logger.info("--> injecting index [{}] into [{}]", indexName, indicesPath); - OldIndexUtils.copyIndex(logger, src, indexName, indicesPath); + OldIndexUtils.copyIndex(logger, src, src.getFileName().toString(), indicesPath); IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment); // ensure old index folder is deleted diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 6f0aeca9d77..28775defe45 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -76,8 +76,9 @@ public class DiscoveryModuleTests extends ModuleTestCase { public void testUnknownHostsProvider() { Settings settings = Settings.builder().put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "dne").build(); - DiscoveryModule module = new DiscoveryModule(settings, null, null, Collections.emptyList()); - assertBindingFailure(module, "Unknown zen hosts provider"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + new DiscoveryModule(settings, null, null, Collections.emptyList())); + assertEquals("Unknown zen hosts provider [dne]", e.getMessage()); } public void testDuplicateHostsProvider() { diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 2ea675ab3f7..ca4b1c9b120 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -50,13 +50,12 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.FaultDetection; import org.elasticsearch.discovery.zen.MembershipAction; -import org.elasticsearch.discovery.zen.ZenPing; -import org.elasticsearch.discovery.zen.ZenPingService; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.PublishClusterStateAction; +import org.elasticsearch.discovery.zen.UnicastZenPing; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.monitor.jvm.HotThreads; @@ -153,11 +152,31 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { return 1; } + private boolean disableBeforeIndexDeletion; + + @Before + public void setUp() throws Exception { + super.setUp(); + disableBeforeIndexDeletion = false; + } + @Override - protected void beforeIndexDeletion() { - // some test may leave operations in flight - // this is because the disruption schemes swallow requests by design - // as such, these operations will never be marked as finished + public void setDisruptionScheme(ServiceDisruptionScheme scheme) { + if (scheme instanceof NetworkDisruption && + ((NetworkDisruption) scheme).getNetworkLinkDisruptionType() instanceof NetworkUnresponsive) { + // the network unresponsive disruption may leave operations in flight + // this is because this disruption scheme swallows requests by design + // as such, these operations will never be marked as finished + disableBeforeIndexDeletion = true; + } + super.setDisruptionScheme(scheme); + } + + @Override + protected void beforeIndexDeletion() throws IOException { + if (disableBeforeIndexDeletion == false) { + super.beforeIndexDeletion(); + } } private List startCluster(int numberOfNodes) throws ExecutionException, InterruptedException { @@ -175,12 +194,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureStableCluster(numberOfNodes); // TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results - for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) { - for (ZenPing zenPing : pingService.zenPings()) { - if (zenPing instanceof UnicastZenPing) { - ((UnicastZenPing) zenPing).clearTemporalResponses(); - } - } + ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + if (zenPing instanceof UnicastZenPing) { + ((UnicastZenPing) zenPing).clearTemporalResponses(); } return nodes; } @@ -361,7 +377,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } if (!success) { fail("node [" + node + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n" - + nodeState.prettyPrint()); + + nodeState); } } @@ -448,13 +464,13 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { assertEquals("unequal node count", state.nodes().getSize(), nodeState.nodes().getSize()); assertEquals("different masters ", state.nodes().getMasterNodeId(), nodeState.nodes().getMasterNodeId()); assertEquals("different meta data version", state.metaData().version(), nodeState.metaData().version()); - if (!state.routingTable().prettyPrint().equals(nodeState.routingTable().prettyPrint())) { + if (!state.routingTable().toString().equals(nodeState.routingTable().toString())) { fail("different routing"); } } catch (AssertionError t) { fail("failed comparing cluster state: " + t.getMessage() + "\n" + - "--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state.prettyPrint() + - "\n--- cluster state [" + node + "]: ---\n" + nodeState.prettyPrint()); + "--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state + + "\n--- cluster state [" + node + "]: ---\n" + nodeState); } } @@ -747,15 +763,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); // Make sure that the end state is consistent on all nodes: assertDiscoveryCompleted(nodes); - // Use assertBusy(...) because the unfrozen node may take a while to actually join the cluster. - // The assertDiscoveryCompleted(...) can't know if all nodes have the old master node in all of the local cluster states - assertBusy(new Runnable() { - @Override - public void run() { - assertMaster(newMasterNode, nodes); - } - }); - + assertMaster(newMasterNode, nodes); assertThat(masters.size(), equalTo(2)); for (Map.Entry>> entry : masters.entrySet()) { @@ -846,10 +854,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list // includes all the other nodes that have pinged it and the issue doesn't manifest - for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) { - for (ZenPing zenPing : pingService.zenPings()) { - ((UnicastZenPing) zenPing).clearTemporalResponses(); - } + ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + if (zenPing instanceof UnicastZenPing) { + ((UnicastZenPing) zenPing).clearTemporalResponses(); } // Simulate a network issue between the unlucky node and elected master node in both directions. @@ -884,10 +891,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list // includes all the other nodes that have pinged it and the issue doesn't manifest - for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) { - for (ZenPing zenPing : pingService.zenPings()) { - ((UnicastZenPing) zenPing).clearTemporalResponses(); - } + ZenPing zenPing = internalCluster().getInstance(ZenPing.class); + if (zenPing instanceof UnicastZenPing) { + ((UnicastZenPing) zenPing).clearTemporalResponses(); } // Simulate a network issue between the unicast target node and the rest of the cluster @@ -1255,7 +1261,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { final ClusterState state = client().admin().cluster().prepareState().get().getState(); if (state.metaData().hasIndex("test") == false) { - fail("index 'test' was lost. current cluster state: " + state.prettyPrint()); + fail("index 'test' was lost. current cluster state: " + state); } } @@ -1352,14 +1358,16 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { }, 10, TimeUnit.SECONDS); } - private void assertMaster(String masterNode, List nodes) { - for (String node : nodes) { - ClusterState state = getNodeClusterState(node); - String failMsgSuffix = "cluster_state:\n" + state.prettyPrint(); - assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); - String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; - assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode)); - } + private void assertMaster(String masterNode, List nodes) throws Exception { + assertBusy(() -> { + for (String node : nodes) { + ClusterState state = getNodeClusterState(node); + String failMsgSuffix = "cluster_state:\n" + state; + assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); + String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; + assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode)); + } + }); } private void assertDiscoveryCompleted(List nodes) throws InterruptedException { diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java index 14561f255cf..3af2e32eefa 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryIT.java @@ -66,7 +66,7 @@ public class ZenUnicastDiscoveryIT extends ESIntegTestCase { internalCluster().startNodesAsync(currentNumNodes - unicastHostOrdinals.length).get(); if (client().admin().cluster().prepareHealth().setWaitForNodes("" + currentNumNodes).get().isTimedOut()) { - logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint()); + logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState()); fail("timed out waiting for cluster to form with [" + currentNumNodes + "] nodes"); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index eb580716622..95fcb88a7ea 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -676,10 +676,10 @@ public class NodeJoinControllerTests extends ESTestCase { protected void assertNodesInCurrentState(List expectedNodes) { final ClusterState state = clusterService.state(); - logger.info("assert for [{}] in:\n{}", expectedNodes, state.prettyPrint()); + logger.info("assert for [{}] in:\n{}", expectedNodes, state); DiscoveryNodes discoveryNodes = state.nodes(); for (DiscoveryNode node : expectedNodes) { - assertThat("missing " + node + "\n" + discoveryNodes.prettyPrint(), discoveryNodes.get(node.getId()), equalTo(node)); + assertThat("missing " + node + "\n" + discoveryNodes, discoveryNodes.get(node.getId()), equalTo(node)); } assertThat(discoveryNodes.getSize(), equalTo(expectedNodes.size())); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java index eb8153c8354..e1d2a226a02 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/PublishClusterStateActionTests.java @@ -41,8 +41,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; -import org.elasticsearch.discovery.zen.PublishClusterStateAction; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; @@ -852,8 +850,8 @@ public class PublishClusterStateActionTests extends ESTestCase { void assertSameState(ClusterState actual, ClusterState expected) { assertThat(actual, notNullValue()); - final String reason = "\n--> actual ClusterState: " + actual.prettyPrint() + "\n" + - "--> expected ClusterState:" + expected.prettyPrint(); + final String reason = "\n--> actual ClusterState: " + actual + "\n" + + "--> expected ClusterState:" + expected; assertThat("unequal UUIDs" + reason, actual.stateUUID(), equalTo(expected.stateUUID())); assertThat("unequal versions" + reason, actual.version(), equalTo(expected.version())); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index 106612f22e0..4294bdd3dd4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -33,10 +34,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.UnicastZenPing; -import org.elasticsearch.discovery.zen.PingContextProvider; -import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -47,6 +44,7 @@ import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; +import java.io.IOException; import java.net.InetSocketAddress; import java.util.Collection; import java.util.Collections; @@ -62,7 +60,7 @@ import static org.hamcrest.Matchers.greaterThan; public class UnicastZenPingTests extends ESTestCase { private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; - public void testSimplePings() throws InterruptedException { + public void testSimplePings() throws IOException, InterruptedException { int startPort = 11000 + randomIntBetween(0, 1000); int endPort = startPort + 10; Settings settings = Settings.builder() @@ -97,7 +95,7 @@ public class UnicastZenPingTests extends ESTestCase { Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build(); UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER); - zenPingA.setPingContextProvider(new PingContextProvider() { + zenPingA.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build(); @@ -108,10 +106,9 @@ public class UnicastZenPingTests extends ESTestCase { return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build(); } }); - zenPingA.start(); UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER); - zenPingB.setPingContextProvider(new PingContextProvider() { + zenPingB.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build(); @@ -122,7 +119,6 @@ public class UnicastZenPingTests extends ESTestCase { return state; } }); - zenPingB.start(); UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER) { @Override @@ -130,7 +126,7 @@ public class UnicastZenPingTests extends ESTestCase { return versionD; } }; - zenPingC.setPingContextProvider(new PingContextProvider() { + zenPingC.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build(); @@ -141,10 +137,9 @@ public class UnicastZenPingTests extends ESTestCase { return state; } }); - zenPingC.start(); UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, EMPTY_HOSTS_PROVIDER); - zenPingD.setPingContextProvider(new PingContextProvider() { + zenPingD.start(new PingContextProvider() { @Override public DiscoveryNodes nodes() { return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build(); @@ -155,7 +150,6 @@ public class UnicastZenPingTests extends ESTestCase { return state; } }); - zenPingD.start(); try { logger.info("ping from UZP_A"); @@ -185,15 +179,12 @@ public class UnicastZenPingTests extends ESTestCase { assertThat(pingResponses.size(), equalTo(0)); assertCounters(handleD, handleA, handleB, handleC, handleD); } finally { - zenPingA.close(); - zenPingB.close(); - zenPingC.close(); - zenPingD.close(); - handleA.transportService.close(); - handleB.transportService.close(); - handleC.transportService.close(); - handleD.transportService.close(); - terminate(threadPool); + try { + IOUtils.close(zenPingA, zenPingB, zenPingC, zenPingD, + handleA.transportService, handleB.transportService, handleC.transportService, handleD.transportService); + } finally { + terminate(threadPool); + } } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 8c13b5783d9..88cf23fe938 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.PublishClusterStateActionTests.AssertingAckListener; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -55,7 +56,9 @@ import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_M import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; public class ZenDiscoveryUnitTests extends ESTestCase { @@ -182,7 +185,6 @@ public class ZenDiscoveryUnitTests extends ESTestCase { toClose.add(otherZen); otherTransport.acceptIncomingRequests(); - masterTransport.connectToNode(otherNode); otherTransport.connectToNode(masterNode); @@ -213,10 +215,62 @@ public class ZenDiscoveryUnitTests extends ESTestCase { } } + public void testPendingCSQueueIsClearedWhenClusterStatePublished() throws Exception { + ThreadPool threadPool = new TestThreadPool(getClass().getName()); + // randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure + int minMasterNodes = randomBoolean() ? 3 : 1; + Settings settings = Settings.builder() + .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build(); + + ArrayList toClose = new ArrayList<>(); + try { + final MockTransportService masterTransport = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null); + masterTransport.start(); + DiscoveryNode masterNode = new DiscoveryNode("master", masterTransport.boundAddress().publishAddress(), Version.CURRENT); + toClose.add(masterTransport); + masterTransport.setLocalNode(masterNode); + ClusterState state = ClusterStateCreationUtils.state(masterNode, null, masterNode); + // build the zen discovery and cluster service + ClusterService masterClusterService = createClusterService(threadPool, masterNode); + toClose.add(masterClusterService); + state = ClusterState.builder(masterClusterService.getClusterName()).nodes(state.nodes()).build(); + setState(masterClusterService, state); + ZenDiscovery masterZen = buildZenDiscovery(settings, masterTransport, masterClusterService, threadPool); + toClose.add(masterZen); + masterTransport.acceptIncomingRequests(); + + // inject a pending cluster state + masterZen.pendingClusterStatesQueue().addPending(ClusterState.builder(new ClusterName("foreign")).build()); + + // a new cluster state with a new discovery node (we will test if the cluster state + // was updated by the presence of this node in NodesFaultDetection) + ClusterState newState = ClusterState.builder(masterClusterService.state()).incrementVersion().nodes( + DiscoveryNodes.builder(masterClusterService.state().nodes()).masterNodeId(masterNode.getId()) + ).build(); + + + try { + // publishing a new cluster state + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state); + AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1); + masterZen.publish(clusterChangedEvent, listener); + listener.await(1, TimeUnit.HOURS); + // publish was a success, check that queue as cleared + assertThat(masterZen.pendingClusterStates(), emptyArray()); + } catch (Discovery.FailedToCommitClusterStateException e) { + // not successful, so the pending queue should stay + assertThat(masterZen.pendingClusterStates(), arrayWithSize(1)); + assertThat(masterZen.pendingClusterStates()[0].getClusterName().value(), equalTo("foreign")); + } + } finally { + IOUtils.close(toClose); + terminate(threadPool); + } + } + private ZenDiscovery buildZenDiscovery(Settings settings, TransportService service, ClusterService clusterService, ThreadPool threadPool) { ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - ZenPingService zenPingService = new ZenPingService(settings, Collections.emptySet()); - ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, clusterSettings, zenPingService); + ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, clusterService, clusterSettings, new MockZenPing(settings)); zenDiscovery.start(); return zenDiscovery; } diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index 7bae67f5b2f..a335a42edb6 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -291,13 +291,14 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertThat(gResp2.getSource().get("foo"), equalTo("bar")); // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); + internalCluster().startNode(nodeSettings); ensureGreen(IDX); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); flushAndRefresh(IDX); logger.info("--> stopping node1 [{}]", node1); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); + ensureClusterSizeConsistency(); // wait for the new node to be elected and process the node leave ensureYellow(IDX); logger.info("--> performing query"); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a77c402c74f..fcc7db73a65 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -31,8 +31,11 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.LogByteSizeMergePolicy; @@ -68,11 +71,9 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -106,6 +107,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.OldIndexUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; @@ -131,10 +133,12 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -279,12 +283,21 @@ public class InternalEngineTests extends ESTestCase { } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { - return createEngine(defaultSettings, store, translogPath, newMergePolicy()); + return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null); } protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) throws IOException { + return createEngine(indexSettings, store, translogPath, mergePolicy, null); + + } + protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, Supplier indexWriterSupplier) throws IOException { EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); - InternalEngine internalEngine = new InternalEngine(config); + InternalEngine internalEngine = new InternalEngine(config) { + @Override + IndexWriter createWriter(boolean create) throws IOException { + return (indexWriterSupplier != null) ? indexWriterSupplier.get() : super.createWriter(create); + } + }; if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { internalEngine.recoverFromTranslog(); } @@ -335,11 +348,11 @@ public class InternalEngineTests extends ESTestCase { // create two docs and refresh ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); Engine.Index first = new Engine.Index(newUid("1"), doc); - engine.index(first); + Engine.IndexResult firstResult = engine.index(first); ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); Engine.Index second = new Engine.Index(newUid("2"), doc2); - engine.index(second); - assertThat(second.getTranslogLocation(), greaterThan(first.getTranslogLocation())); + Engine.IndexResult secondResult = engine.index(second); + assertThat(secondResult.getTranslogLocation(), greaterThan(firstResult.getTranslogLocation())); engine.refresh("test"); segments = engine.segments(false); @@ -629,7 +642,7 @@ public class InternalEngineTests extends ESTestCase { operations.add(operation); initialEngine.index(operation); } else { - final Engine.Delete operation = new Engine.Delete("test", "1", newUid("test#1"), i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false); + final Engine.Delete operation = new Engine.Delete("test", "1", newUid("test#1"), i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()); operations.add(operation); initialEngine.delete(operation); } @@ -1040,93 +1053,82 @@ public class InternalEngineTests extends ESTestCase { public void testVersioningNewCreate() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED); - engine.index(create); - assertThat(create.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); - create = new Engine.Index(newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(create); - assertThat(create.version(), equalTo(1L)); + create = new Engine.Index(newUid("1"), doc, indexResult.getVersion(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); } public void testVersioningNewIndex() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); - index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(1L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); } public void testExternalVersioningNewIndex() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(12L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); - index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(12L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); } public void testVersioningIndexConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testExternalVersioningIndexConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(12L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(14L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(14L)); index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testForceVersioningNotAllowedExceptForOlderIndices() throws Exception { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc, 42, VersionType.FORCE, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail("should have failed due to using VersionType.FORCE"); - } catch (IllegalArgumentException iae) { - assertThat(iae.getMessage(), containsString("version type [FORCE] may not be used for indices created after 6.0")); - } + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(IllegalArgumentException.class)); + assertThat(indexResult.getFailure().getMessage(), containsString("version type [FORCE] may not be used for indices created after 6.0")); IndexSettings oldIndexSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0_beta1) @@ -1134,69 +1136,58 @@ public class InternalEngineTests extends ESTestCase { try (Store store = createStore(); Engine engine = createEngine(oldIndexSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { index = new Engine.Index(newUid("1"), doc, 84, VersionType.FORCE, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail("should have failed due to using VersionType.FORCE"); - } catch (IllegalArgumentException iae) { - assertThat(iae.getMessage(), containsString("version type [FORCE] may not be used for non-translog operations")); - } + Engine.IndexResult result = engine.index(index); + assertTrue(result.hasFailure()); + assertThat(result.getFailure(), instanceOf(IllegalArgumentException.class)); + assertThat(result.getFailure().getMessage(), containsString("version type [FORCE] may not be used for non-translog operations")); index = new Engine.Index(newUid("1"), doc, 84, VersionType.FORCE, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(84L)); + result = engine.index(index); + assertThat(result.getVersion(), equalTo(84L)); } } public void testVersioningIndexConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); engine.flush(); index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well index = new Engine.Index(newUid("1"), doc, 3L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testExternalVersioningIndexConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(12L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(12L)); index = new Engine.Index(newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - engine.index(index); - assertThat(index.version(), equalTo(14L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(14L)); engine.flush(); index = new Engine.Index(newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testForceMerge() throws IOException { @@ -1297,254 +1288,202 @@ public class InternalEngineTests extends ESTestCase { public void testVersioningDeleteConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); - Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), 1L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), 1L, VersionType.INTERNAL, PRIMARY, 0); + Engine.DeleteResult result = engine.delete(delete); + assertTrue(result.hasFailure()); + assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well - delete = new Engine.Delete("test", "1", newUid("1"), 3L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + delete = new Engine.Delete("test", "1", newUid("1"), 3L, VersionType.INTERNAL, PRIMARY, 0); + result = engine.delete(delete); + assertTrue(result.hasFailure()); + assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); // now actually delete - delete = new Engine.Delete("test", "1", newUid("1"), 2L, VersionType.INTERNAL, PRIMARY, 0, false); - engine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + delete = new Engine.Delete("test", "1", newUid("1"), 2L, VersionType.INTERNAL, PRIMARY, 0); + result = engine.delete(delete); + assertThat(result.getVersion(), equalTo(3L)); // now check if we can index to a delete doc with version index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } - - // we shouldn't be able to create as well - Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningDeleteConflictWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); engine.flush(); - Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), 1L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), 1L, VersionType.INTERNAL, PRIMARY, 0); + Engine.DeleteResult deleteResult = engine.delete(delete); + assertTrue(deleteResult.hasFailure()); + assertThat(deleteResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // future versions should not work as well - delete = new Engine.Delete("test", "1", newUid("1"), 3L, VersionType.INTERNAL, PRIMARY, 0, false); - try { - engine.delete(delete); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + delete = new Engine.Delete("test", "1", newUid("1"), 3L, VersionType.INTERNAL, PRIMARY, 0); + deleteResult = engine.delete(delete); + assertTrue(deleteResult.hasFailure()); + assertThat(deleteResult.getFailure(), instanceOf(VersionConflictEngineException.class)); engine.flush(); // now actually delete - delete = new Engine.Delete("test", "1", newUid("1"), 2L, VersionType.INTERNAL, PRIMARY, 0, false); - engine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + delete = new Engine.Delete("test", "1", newUid("1"), 2L, VersionType.INTERNAL, PRIMARY, 0); + deleteResult = engine.delete(delete); + assertThat(deleteResult.getVersion(), equalTo(3L)); engine.flush(); // now check if we can index to a delete doc with version index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } - - // we shouldn't be able to create as well - Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningCreateExistsException() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - engine.index(create); - assertThat(create.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(create); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningCreateExistsExceptionWithFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - engine.index(create); - assertThat(create.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(create); + assertThat(indexResult.getVersion(), equalTo(1L)); engine.flush(); create = new Engine.Index(newUid("1"), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); - try { - engine.index(create); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = engine.index(create); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningReplicaConflict1() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); // apply the second index to the replica, should work fine - index = new Engine.Index(newUid("1"), doc, index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(2L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getVersion(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); // now, the old one should not work index = new Engine.Index(newUid("1"), doc, 1L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - try { - replicaEngine.index(index); - fail(); - } catch (VersionConflictEngineException e) { - // all is well - } + indexResult = replicaEngine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // second version on replica should fail as well - try { - index = new Engine.Index(newUid("1"), doc, 2L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(2L)); - } catch (VersionConflictEngineException e) { - // all is well - } + index = new Engine.Index(newUid("1"), doc, 2L + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testVersioningReplicaConflict2() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); // apply the first index to the replica, should work fine index = new Engine.Index(newUid("1"), doc, 1L , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - assertThat(index.version(), equalTo(1L)); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); // index it again index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertThat(index.version(), equalTo(2L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(2L)); // now delete it Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")); - engine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + Engine.DeleteResult deleteResult = engine.delete(delete); + assertThat(deleteResult.getVersion(), equalTo(3L)); // apply the delete on the replica (skipping the second index) delete = new Engine.Delete("test", "1", newUid("1"), 3L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, false); - replicaEngine.delete(delete); - assertThat(delete.version(), equalTo(3L)); + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + deleteResult = replicaEngine.delete(delete); + assertThat(deleteResult.getVersion(), equalTo(3L)); // second time delete with same version should fail - try { - delete = new Engine.Delete("test", "1", newUid("1"), 3L - , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, false); - replicaEngine.delete(delete); - fail("excepted VersionConflictEngineException to be thrown"); - } catch (VersionConflictEngineException e) { - // all is well - } + delete = new Engine.Delete("test", "1", newUid("1"), 3L + , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0); + deleteResult = replicaEngine.delete(delete); + assertTrue(deleteResult.hasFailure()); + assertThat(deleteResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // now do the second index on the replica, it should fail - try { - index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); - replicaEngine.index(index); - fail("excepted VersionConflictEngineException to be thrown"); - } catch (VersionConflictEngineException e) { - // all is well - } + index = new Engine.Index(newUid("1"), doc, 2L, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + indexResult = replicaEngine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } public void testBasicCreatedFlag() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertFalse(index.isCreated()); + indexResult = engine.index(index); + assertFalse(indexResult.isCreated()); engine.delete(new Engine.Delete(null, "1", newUid("1"))); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); } public void testCreatedFlagAfterFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); engine.delete(new Engine.Delete(null, "1", newUid("1"))); engine.flush(); index = new Engine.Index(newUid("1"), doc); - engine.index(index); - assertTrue(index.isCreated()); + indexResult = engine.index(index); + assertTrue(indexResult.isCreated()); } private static class MockAppender extends AbstractAppender { @@ -1647,7 +1586,7 @@ public class InternalEngineTests extends ESTestCase { engine.index(new Engine.Index(newUid("1"), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); // Delete document we just added: - engine.delete(new Engine.Delete("test", "1", newUid("1"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false)); + engine.delete(new Engine.Delete("test", "1", newUid("1"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1"))); @@ -1661,31 +1600,27 @@ public class InternalEngineTests extends ESTestCase { } // Delete non-existent document - engine.delete(new Engine.Delete("test", "2", newUid("2"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false)); + engine.delete(new Engine.Delete("test", "2", newUid("2"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): getResult = engine.get(new Engine.Get(true, newUid("2"))); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: - try { - engine.index(new Engine.Index(newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); - fail("did not hit expected exception"); - } catch (VersionConflictEngineException vcee) { - // expected - } + Engine.Index index = new Engine.Index(newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + Engine.IndexResult indexResult = engine.index(index); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // Get should still not find the document getResult = engine.get(new Engine.Get(true, newUid("1"))); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=2 with a too-old version, should fail: - try { - engine.index(new Engine.Index(newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); - fail("did not hit expected exception"); - } catch (VersionConflictEngineException vcee) { - // expected - } + Engine.Index index1 = new Engine.Index(newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + indexResult = engine.index(index1); + assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // Get should not find the document getResult = engine.get(new Engine.Get(true, newUid("2"))); @@ -1781,8 +1716,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -1831,8 +1766,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -1877,17 +1812,18 @@ public class InternalEngineTests extends ESTestCase { Path[] list = filterExtraFSFiles(FileSystemUtils.files(unzipDataDir)); if (list.length != 1) { - throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length + " " + Arrays.toString(list)); + throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length + + " " + Arrays.toString(list)); } + // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); - Path translog = list[0].resolve("nodes/0/indices/" + indexName).resolve("0").resolve("translog"); - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); + Path src = OldIndexUtils.getIndexDir(logger, indexName, indexFile.toString(), list[0]); + Path translog = src.resolve("0").resolve("translog"); assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog)); Path[] tlogFiles = filterExtraFSFiles(FileSystemUtils.files(translog)); assertEquals(Arrays.toString(tlogFiles), tlogFiles.length, 2); // ckp & tlog Path tlogFile = tlogFiles[0].getFileName().toString().endsWith("tlog") ? tlogFiles[0] : tlogFiles[1]; - final long size = Files.size(tlogFiles[0]); + final long size = Files.size(tlogFile); logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size); Directory directory = newFSDirectory(src.resolve("0").resolve("index")); Store store = createStore(directory); @@ -1921,8 +1857,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numExtraDocs; i++) { ParsedDocument doc = testParsedDocument("extra" + Integer.toString(i), "extra" + Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -1950,8 +1886,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -1993,17 +1929,17 @@ public class InternalEngineTests extends ESTestCase { String uuidValue = "test#" + Integer.toString(randomId); ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); if (flush) { engine.flush(); } doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index idxRequest = new Engine.Index(newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(idxRequest); + Engine.IndexResult result = engine.index(idxRequest); engine.refresh("test"); - assertThat(idxRequest.version(), equalTo(2L)); + assertThat(result.getVersion(), equalTo(2L)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); assertThat(topDocs.totalHits, equalTo(numDocs + 1)); @@ -2032,7 +1968,7 @@ public class InternalEngineTests extends ESTestCase { public static class TranslogHandler extends TranslogRecoveryPerformer { - private final DocumentMapper docMapper; + private final MapperService mapperService; public Mapping mappingUpdate = null; public final AtomicInteger recoveredOps = new AtomicInteger(0); @@ -2040,7 +1976,6 @@ public class InternalEngineTests extends ESTestCase { public TranslogHandler(String indexName, Logger logger) { super(new ShardId("test", "_na_", 0), null, logger); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); - RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test"); Index index = new Index(indexName, "_na_"); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings); IndexAnalyzers indexAnalyzers = null; @@ -2048,14 +1983,14 @@ public class InternalEngineTests extends ESTestCase { indexAnalyzers = new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, () -> null); - DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService); - this.docMapper = b.build(mapperService); + mapperService = new MapperService(indexSettings, indexAnalyzers, similarityService, mapperRegistry, () -> null); } @Override protected DocumentMapperForType docMapper(String type) { - return new DocumentMapperForType(docMapper, mappingUpdate); + RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder(type); + DocumentMapper.Builder b = new DocumentMapper.Builder(rootBuilder, mapperService); + return new DocumentMapperForType(b.build(mapperService), mappingUpdate); } @Override @@ -2069,8 +2004,8 @@ public class InternalEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult index = engine.index(firstIndexRequest); + assertThat(index.getVersion(), equalTo(1L)); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2212,13 +2147,78 @@ public class InternalEngineTests extends ESTestCase { } } + public void testCheckDocumentFailure() throws Exception { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + Exception documentFailure = engine.checkIfDocumentFailureOrThrow(new Engine.Index(newUid("1"), doc), new IOException("simulated document failure")); + assertThat(documentFailure, instanceOf(IOException.class)); + try { + engine.checkIfDocumentFailureOrThrow(new Engine.Index(newUid("1"), doc), new CorruptIndexException("simulated environment failure", "")); + fail("expected exception to be thrown"); + } catch (Exception envirnomentException) { + assertThat(envirnomentException.getMessage(), containsString("simulated environment failure")); + } + } + + private static class ThrowingIndexWriter extends IndexWriter { + private boolean throwDocumentFailure; + + public ThrowingIndexWriter(Directory d, IndexWriterConfig conf) throws IOException { + super(d, conf); + } + + @Override + public long addDocument(Iterable doc) throws IOException { + if (throwDocumentFailure) { + throw new IOException("simulated"); + } else { + return super.addDocument(doc); + } + } + + @Override + public long deleteDocuments(Term... terms) throws IOException { + if (throwDocumentFailure) { + throw new IOException("simulated"); + } else { + return super.deleteDocuments(terms); + } + } + + public void setThrowDocumentFailure(boolean throwDocumentFailure) { + this.throwDocumentFailure = throwDocumentFailure; + } + } + + public void testHandleDocumentFailure() throws Exception { + try (Store store = createStore()) { + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); + ThrowingIndexWriter throwingIndexWriter = new ThrowingIndexWriter(store.directory(), new IndexWriterConfig()); + try (Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, () -> throwingIndexWriter)) { + // test document failure while indexing + throwingIndexWriter.setThrowDocumentFailure(true); + Engine.IndexResult indexResult = engine.index(randomAppendOnly(1, doc, false)); + assertNotNull(indexResult.getFailure()); + + throwingIndexWriter.setThrowDocumentFailure(false); + indexResult = engine.index(randomAppendOnly(1, doc, false)); + assertNull(indexResult.getFailure()); + + // test document failure while deleting + throwingIndexWriter.setThrowDocumentFailure(true); + Engine.DeleteResult deleteResult = engine.delete(new Engine.Delete("test", "", newUid("1"))); + assertNotNull(deleteResult.getFailure()); + } + } + + } + public void testDocStats() throws IOException { final int numDocs = randomIntBetween(2, 10); // at least 2 documents otherwise we don't see any deletes below for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } DocsStats docStats = engine.getDocStats(); assertEquals(numDocs, docStats.getCount()); @@ -2227,8 +2227,8 @@ public class InternalEngineTests extends ESTestCase { ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(0)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(2L)); + Engine.IndexResult index = engine.index(firstIndexRequest); + assertThat(index.getVersion(), equalTo(2L)); engine.flush(); // flush - buffered deletes are not counted docStats = engine.getDocStats(); assertEquals(1, docStats.getDeleted()); @@ -2244,25 +2244,25 @@ public class InternalEngineTests extends ESTestCase { Engine.Index operation = randomAppendOnly(1, doc, false); Engine.Index retry = randomAppendOnly(1, doc, true); if (randomBoolean()) { - engine.index(operation); + Engine.IndexResult indexResult = engine.index(operation); assertFalse(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(operation.getTranslogLocation()); - engine.index(retry); + assertNotNull(indexResult.getTranslogLocation()); + Engine.IndexResult retryResult = engine.index(retry); assertTrue(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { - engine.index(retry); + Engine.IndexResult retryResult = engine.index(retry); assertTrue(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(retry.getTranslogLocation()); - engine.index(operation); + assertNotNull(retryResult.getTranslogLocation()); + Engine.IndexResult indexResult = engine.index(operation); assertTrue(engine.indexWriterHasDeletions()); assertEquals(0, engine.getNumVersionLookups()); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); } engine.refresh("test"); @@ -2273,17 +2273,17 @@ public class InternalEngineTests extends ESTestCase { operation = randomAppendOnly(1, doc, false); retry = randomAppendOnly(1, doc, true); if (randomBoolean()) { - engine.index(operation); - assertNotNull(operation.getTranslogLocation()); - engine.index(retry); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) > 0); + Engine.IndexResult indexResult = engine.index(operation); + assertNotNull(indexResult.getTranslogLocation()); + Engine.IndexResult retryResult = engine.index(retry); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { - engine.index(retry); - assertNotNull(retry.getTranslogLocation()); - engine.index(operation); - assertNotNull(retry.getTranslogLocation()); - assertTrue(retry.getTranslogLocation().compareTo(operation.getTranslogLocation()) < 0); + Engine.IndexResult retryResult = engine.index(retry); + assertNotNull(retryResult.getTranslogLocation()); + Engine.IndexResult indexResult = engine.index(operation); + assertNotNull(retryResult.getTranslogLocation()); + assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); } engine.refresh("test"); @@ -2301,25 +2301,26 @@ public class InternalEngineTests extends ESTestCase { long autoGeneratedIdTimestamp = 0; Engine.Index index = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + Engine.IndexResult indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); - index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - replicaEngine.index(index); - assertThat(index.version(), equalTo(1L)); + index = new Engine.Index(newUid("1"), doc, indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + indexResult = replicaEngine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); isRetry = true; index = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(index); - assertThat(index.version(), equalTo(1L)); + indexResult = engine.index(index); + assertThat(indexResult.getVersion(), equalTo(1L)); engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, topDocs.totalHits); } - index = new Engine.Index(newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - replicaEngine.index(index); + index = new Engine.Index(newUid("1"), doc, indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + indexResult = replicaEngine.index(index); + assertThat(indexResult.hasFailure(), equalTo(false)); replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); @@ -2335,24 +2336,24 @@ public class InternalEngineTests extends ESTestCase { Engine.Index firstIndexRequest = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult result = engine.index(firstIndexRequest); + assertThat(result.getVersion(), equalTo(1L)); - Engine.Index firstIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - replicaEngine.index(firstIndexRequestReplica); - assertThat(firstIndexRequestReplica.version(), equalTo(1L)); + Engine.Index firstIndexRequestReplica = new Engine.Index(newUid("1"), doc, result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.IndexResult indexReplicaResult = replicaEngine.index(firstIndexRequestReplica); + assertThat(indexReplicaResult.getVersion(), equalTo(1L)); isRetry = false; Engine.Index secondIndexRequest = new Engine.Index(newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); - engine.index(secondIndexRequest); - assertTrue(secondIndexRequest.isCreated()); + Engine.IndexResult indexResult = engine.index(secondIndexRequest); + assertTrue(indexResult.isCreated()); engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, topDocs.totalHits); } - Engine.Index secondIndexRequestReplica = new Engine.Index(newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index secondIndexRequestReplica = new Engine.Index(newUid("1"), doc, result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); replicaEngine.index(secondIndexRequestReplica); replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 6dea774f258..9e5b8107965 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -988,8 +988,8 @@ public class ShadowEngineTests extends ESTestCase { for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); - primaryEngine.index(firstIndexRequest); - assertThat(firstIndexRequest.version(), equalTo(1L)); + Engine.IndexResult indexResult = primaryEngine.index(firstIndexRequest); + assertThat(indexResult.getVersion(), equalTo(1L)); } DocsStats docStats = primaryEngine.getDocStats(); assertEquals(numDocs, docStats.getCount()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java index 9bc87e874f9..391f987e714 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CustomBoostMappingTests.java @@ -97,7 +97,7 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() .endObject().endObject().endObject().string(); IndexService indexService = createIndex("test", BW_SETTINGS); - QueryShardContext context = indexService.newQueryShardContext(); + QueryShardContext context = indexService.newQueryShardContext(0, null, () -> 0L); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); DocumentFieldMappers fieldMappers = mapper.mappers(); assertThat(fieldMappers.getMapper("s_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class)); @@ -150,7 +150,7 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase { .startObject("date_field").field("type", "date").field("boost", 9.0f).endObject() .endObject().endObject().endObject().string(); IndexService indexService = createIndex("text"); - QueryShardContext context = indexService.newQueryShardContext(); + QueryShardContext context = indexService.newQueryShardContext(0, null, () -> 0L); DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); DocumentFieldMappers fieldMappers = mapper.mappers(); assertThat(fieldMappers.getMapper("s_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index b434b41242f..af217030f2c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -45,7 +45,7 @@ public class DoubleIndexingDocTests extends ESSingleNodeTestCase { IndexService index = createIndex("test"); client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get(); DocumentMapper mapper = index.mapperService().documentMapper("type"); - QueryShardContext context = index.newQueryShardContext(); + QueryShardContext context = index.newQueryShardContext(0, null, () -> 0L); ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 44c973748d0..72f9d09808f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -39,6 +40,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.is; @@ -59,8 +61,11 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { Collections.singletonMap(ExternalMapperPlugin.EXTERNAL, new ExternalMapper.TypeParser(ExternalMapperPlugin.EXTERNAL, "foo")), Collections.singletonMap(ExternalMetadataMapper.CONTENT_TYPE, new ExternalMetadataMapper.TypeParser())); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type") .startObject(ExternalMetadataMapper.CONTENT_TYPE) @@ -108,8 +113,11 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { mapperParsers.put(KeywordFieldMapper.CONTENT_TYPE, new KeywordFieldMapper.TypeParser()); MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") @@ -178,8 +186,11 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { mapperParsers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser()); MapperRegistry mapperRegistry = new MapperRegistry(mapperParsers, Collections.emptyMap()); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent( XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 544764a9b53..5bc70350b13 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -38,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; +import java.util.function.Supplier; public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { @@ -231,9 +233,12 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { Collections.singletonMap("_dummy", new DummyMetadataFieldMapper.TypeParser()) ); final MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; + MapperService mapperService = new MapperService(indexService.getIndexSettings(), indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); DocumentMapperParser parser = new DocumentMapperParser(indexService.getIndexSettings(), mapperService, - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}")); diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 720f527acd1..c5c77e027ac 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -348,9 +348,14 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase parseQuery(query, ParseFieldMatcher.STRICT)); assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + checkWarningHeaders("query malformed, empty clause found at [1:27]"); } /** diff --git a/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java index 96e6959600b..a4658fbbf23 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java @@ -30,9 +30,9 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.Optional; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.startsWith;; +import static org.hamcrest.CoreMatchers.nullValue;; public class BoostingQueryBuilderTests extends AbstractQueryTestCase { @@ -110,22 +110,27 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [1:36]"); + query = - "{ \"boosting\" : {" + - " \"positive\" : { \"match_all\" : {} }, " + - " \"negative\" : { }, " + - " \"negative_boost\" : 23.0" + - " }" + + "{ \"boosting\" : {\n" + + " \"positive\" : { \"match_all\" : {} },\n" + + " \"negative\" : { },\n" + + " \"negative_boost\" : 23.0\n" + + " }\n" + "}"; parser = XContentFactory.xContent(query).createParser(query); context = createParseContext(parser, ParseFieldMatcher.EMPTY); innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [3:20]"); + parser = XContentFactory.xContent(query).createParser(query); QueryParseContext otherContext = createParseContext(parser, ParseFieldMatcher.STRICT); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [3:20]")); + checkWarningHeaders("query malformed, empty clause found at [3:20]"); } public void testRewrite() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java index d1ac2c96192..7ab852bc3f6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/ConstantScoreQueryBuilderTests.java @@ -31,9 +31,9 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.Optional; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.containsString; public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase { @@ -133,11 +133,13 @@ public class ConstantScoreQueryBuilderTests extends AbstractQueryTestCase innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [1:40]"); parser = XContentFactory.xContent(query).createParser(query); QueryParseContext otherContext = createParseContext(parser, ParseFieldMatcher.STRICT); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [1:40]")); + checkWarningHeaders("query malformed, empty clause found at [1:40]"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java index ec542d7ccb1..114109736ea 100644 --- a/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java @@ -99,6 +99,7 @@ public class DisMaxQueryBuilderTests extends AbstractQueryTestCase { + private boolean testSkipped = false; + + /** + * All tests create deprecation warnings when an new FuzzyQueryBuilder is created. Instead of having to check them once + * in every single test, this is done here after each test is run + */ + @After + void checkWarningHeaders() throws IOException { + // only check that warning headers got created for tests that satisfied certain assumptions and were thus not skipped + if (testSkipped == false) { + checkWarningHeaders("fuzzy query is deprecated. Instead use the [match] query with fuzziness parameter"); + } + } + @Override protected FuzzyQueryBuilder doCreateTestQueryBuilder() { FuzzyQueryBuilder query = new FuzzyQueryBuilder(STRING_FIELD_NAME, getRandomValueForFieldName(STRING_FIELD_NAME)); @@ -98,7 +114,13 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase 0); + try { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + } catch (AssumptionViolatedException e) { + // we need to know that this test was skipped in @After checkWarningHeaders(), because no warnings will be generated + testSkipped = true; + throw e; + } String query = "{\n" + " \"fuzzy\":{\n" + " \"" + STRING_FIELD_NAME + "\":{\n" + @@ -121,7 +143,13 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase 0); + try { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + } catch (AssumptionViolatedException e) { + // we need to know that this test was skipped in @After checkWarningHeaders(), because no warnings will be generated + testSkipped = true; + throw e; + } String query = "{\n" + " \"fuzzy\":{\n" + " \"" + INT_FIELD_NAME + "\":{\n" + @@ -157,7 +185,16 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json2)); assertEquals("[fuzzy] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); String shortJson = "{\n" + diff --git a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java index e491f64457d..aa0b82873fa 100644 --- a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -58,7 +59,6 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.startsWith; public class HasChildQueryBuilderTests extends AbstractQueryTestCase { protected static final String PARENT_TYPE = "parent"; @@ -241,7 +241,8 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [3:17]")); + checkWarningHeaders("query malformed, empty clause found at [3:17]"); } public void testToQueryInnerQueryType() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java index cdc2be67d96..eb740835054 100644 --- a/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/HasParentQueryBuilderTests.java @@ -45,7 +45,6 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.startsWith; public class HasParentQueryBuilderTests extends AbstractQueryTestCase { protected static final String PARENT_TYPE = "parent"; @@ -157,6 +156,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase innerQueryBuilder = context.parseInnerQueryBuilder(); assertTrue(innerQueryBuilder.isPresent() == false); + checkWarningHeaders("query malformed, empty clause found at [3:17]"); + parser = XContentFactory.xContent(query).createParser(query); QueryParseContext otherContext = createParseContext(parser, ParseFieldMatcher.STRICT); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> otherContext.parseInnerQueryBuilder()); - assertThat(ex.getMessage(), startsWith("query malformed, empty clause found at")); + assertThat(ex.getMessage(), equalTo("query malformed, empty clause found at [3:17]")); + checkWarningHeaders("query malformed, empty clause found at [3:17]"); } public void testIgnoreUnmapped() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index f21df5bf759..2ad557dcc76 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -140,6 +140,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString)); assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage()); + checkWarningHeaders("Deprecated field [_type] used, expected [type] instead"); //array of types can also be called type rather than types final String contentString2 = "{\n" + @@ -153,5 +154,6 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase e = expectThrows(IllegalArgumentException.class, () -> parseQuery(contentString2)); assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage()); + checkWarningHeaders("Deprecated field [_type] used, expected [type] instead"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java index 00bd7c4004f..3b31d17d9ba 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IndicesQueryBuilderTests.java @@ -22,11 +22,21 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; +import org.junit.After; import java.io.IOException; public class IndicesQueryBuilderTests extends AbstractQueryTestCase { + /** + * All tests create deprecation warnings when an new {@link IndicesQueryBuilder} is created. + * Instead of having to check them once in every single test, this is done here after each test is run + */ + @After + void checkWarningHeaders() throws IOException { + checkWarningHeaders("indices query is deprecated. Instead search on the '_index' field"); + } + @Override protected IndicesQueryBuilder doCreateTestQueryBuilder() { String[] indices; diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 571238e43a1..a4e202e2304 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -313,6 +313,9 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); assertThat(e.getMessage(), @@ -346,6 +349,9 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); assertThat(e.getMessage(), @@ -372,6 +378,8 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase parseQuery(json, ParseFieldMatcher.STRICT)); diff --git a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index aa9462bb3e6..c0900de4de1 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -301,6 +301,11 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase parseQuery(deprecatedJson)); assertEquals("Deprecated field [mlt] used, expected [more_like_this] instead", e.getMessage()); + + checkWarningHeaders("Deprecated field [mlt] used, expected [more_like_this] instead"); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java index 5568d2fa5a7..aa8541ab956 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryParseContextTests.java @@ -21,20 +21,27 @@ package org.elasticsearch.index.query; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; +import java.util.List; import java.util.Optional; import static java.util.Collections.emptyList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; public class QueryParseContextTests extends ESTestCase { @@ -45,6 +52,20 @@ public class QueryParseContextTests extends ESTestCase { indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry(); } + private ThreadContext threadContext; + + @Before + public void beforeTest() throws IOException { + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(threadContext); + } + + @After + public void teardown() throws IOException { + DeprecationLogger.removeThreadContext(this.threadContext); + this.threadContext.close(); + } + public void testParseTopLevelBuilder() throws IOException { QueryBuilder query = new MatchQueryBuilder("foo", "bar"); String requestBody = "{ \"query\" : " + query.toString() + "}"; @@ -89,6 +110,9 @@ public class QueryParseContextTests extends ESTestCase { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.EMPTY); Optional emptyQuery = context.parseInnerQueryBuilder(); assertFalse(emptyQuery.isPresent()); + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(1)); + assertThat(warnings, hasItem(equalTo("query malformed, empty clause found at [1:2]"))); } } @@ -107,6 +131,9 @@ public class QueryParseContextTests extends ESTestCase { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.STRICT); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> context.parseInnerQueryBuilder()); assertEquals("query malformed, empty clause found at [1:2]", exception.getMessage()); + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(1)); + assertThat(warnings, hasItem(equalTo("query malformed, empty clause found at [1:2]"))); } source = "{ \"foo\" : \"bar\" }"; @@ -122,6 +149,9 @@ public class QueryParseContextTests extends ESTestCase { ParsingException exception = expectThrows(ParsingException.class, () -> context.parseInnerQueryBuilder()); assertEquals("no [query] registered for [foo]", exception.getMessage()); } + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(1)); + assertThat(warnings, hasItem(equalTo("query malformed, empty clause found at [1:2]"))); } } diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 353d5704d4d..d89cb6702e0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -36,6 +36,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.common.lucene.all.AllTermQuery; import org.elasticsearch.common.unit.Fuzziness; @@ -45,6 +48,7 @@ import org.hamcrest.Matchers; import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -103,9 +107,6 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase 0); + // splitOnWhitespace=false + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("foo bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(false); + Query query = queryBuilder.toQuery(createShardContext()); + BooleanQuery bq1 = + new BooleanQuery.Builder() + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "foo")), BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), BooleanClause.Occur.SHOULD)) + .build(); + List disjuncts = new ArrayList<>(); + disjuncts.add(bq1); + disjuncts.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo bar"))); + DisjunctionMaxQuery expectedQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); + assertThat(query, equalTo(expectedQuery)); + } + + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("mapped_string:other foo bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(false); + Query query = queryBuilder.toQuery(createShardContext()); + BooleanQuery bq1 = + new BooleanQuery.Builder() + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "foo")), BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), BooleanClause.Occur.SHOULD)) + .build(); + List disjuncts = new ArrayList<>(); + disjuncts.add(bq1); + disjuncts.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo bar"))); + DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery(disjuncts, 0.0f); + BooleanQuery expectedQuery = + new BooleanQuery.Builder() + .add(disjunctionMaxQuery, BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "other")), BooleanClause.Occur.SHOULD) + .build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("foo OR bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(false); + Query query = queryBuilder.toQuery(createShardContext()); + + List disjuncts1 = new ArrayList<>(); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME, "foo"))); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo"))); + DisjunctionMaxQuery maxQuery1 = new DisjunctionMaxQuery(disjuncts1, 0.0f); + + List disjuncts2 = new ArrayList<>(); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME, "bar"))); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar"))); + DisjunctionMaxQuery maxQuery2 = new DisjunctionMaxQuery(disjuncts2, 0.0f); + + BooleanQuery expectedQuery = + new BooleanQuery.Builder() + .add(new BooleanClause(maxQuery1, BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(maxQuery2, BooleanClause.Occur.SHOULD)) + .build(); + assertThat(query, equalTo(expectedQuery)); + } + + // split_on_whitespace=false breaks range query with simple syntax + { + // throws an exception when lenient is set to false + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder(">10 foo") + .field(INT_FIELD_NAME) + .splitOnWhitespace(false); + IllegalArgumentException exc = + expectThrows(IllegalArgumentException.class, () -> queryBuilder.toQuery(createShardContext())); + assertThat(exc.getMessage(), equalTo("For input string: \"10 foo\"")); + } + + { + // returns an empty boolean query when lenient is set to true + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder(">10 foo") + .field(INT_FIELD_NAME) + .splitOnWhitespace(false) + .lenient(true); + Query query = queryBuilder.toQuery(createShardContext()); + BooleanQuery bq = new BooleanQuery.Builder().build(); + assertThat(bq, equalTo(query)); + } + + // splitOnWhitespace=true + { + QueryStringQueryBuilder queryBuilder = + new QueryStringQueryBuilder("foo bar") + .field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .splitOnWhitespace(true); + Query query = queryBuilder.toQuery(createShardContext()); + + List disjuncts1 = new ArrayList<>(); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME, "foo"))); + disjuncts1.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "foo"))); + DisjunctionMaxQuery maxQuery1 = new DisjunctionMaxQuery(disjuncts1, 0.0f); + + List disjuncts2 = new ArrayList<>(); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME, "bar"))); + disjuncts2.add(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar"))); + DisjunctionMaxQuery maxQuery2 = new DisjunctionMaxQuery(disjuncts2, 0.0f); + + BooleanQuery expectedQuery = + new BooleanQuery.Builder() + .add(new BooleanClause(maxQuery1, BooleanClause.Occur.SHOULD)) + .add(new BooleanClause(maxQuery2, BooleanClause.Occur.SHOULD)) + .build(); + assertThat(query, equalTo(expectedQuery)); + } + + + } + public void testFromJson() throws IOException { String json = "{\n" + @@ -544,14 +666,13 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase weights, int flags, Settings settings) { super(analyzer, weights, flags, settings, null); @@ -106,4 +139,45 @@ public class SimpleQueryParserTests extends ESTestCase { } } + public void testQuoteFieldSuffix() { + SimpleQueryParser.Settings sqpSettings = new SimpleQueryParser.Settings(); + sqpSettings.quoteFieldSuffix(".quote"); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_INDEX_UUID, "some_uuid") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + IndexMetaData indexState = IndexMetaData.builder("index").settings(indexSettings).build(); + IndexSettings settings = new IndexSettings(indexState, Settings.EMPTY); + QueryShardContext mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, indicesQueriesRegistry, + null, null, null, System::currentTimeMillis) { + @Override + public MappedFieldType fieldMapper(String name) { + return new MockFieldMapper.FakeFieldType(); + } + }; + + SimpleQueryParser parser = new SimpleQueryParser(new StandardAnalyzer(), + Collections.singletonMap("foo", 1f), -1, sqpSettings, mockShardContext); + assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("bar")); + assertEquals(new TermQuery(new Term("foo.quote", "bar")), parser.parse("\"bar\"")); + + // Now check what happens if foo.quote does not exist + mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, indicesQueriesRegistry, + null, null, null, System::currentTimeMillis) { + @Override + public MappedFieldType fieldMapper(String name) { + if (name.equals("foo.quote")) { + return null; + } + return new MockFieldMapper.FakeFieldType(); + } + }; + parser = new SimpleQueryParser(new StandardAnalyzer(), + Collections.singletonMap("foo", 1f), -1, sqpSettings, mockShardContext); + assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("bar")); + assertEquals(new TermQuery(new Term("foo", "bar")), parser.parse("\"bar\"")); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 07f3f389c27..a8a1de059e8 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -23,10 +23,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.TestUtil; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -56,12 +59,6 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase flagSet = new HashSet<>(); int size = randomIntBetween(0, SimpleQueryStringFlag.values().length); @@ -109,28 +109,11 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase 1) { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery boolQuery = (BooleanQuery) query; - if (queryBuilder.lowercaseExpandedTerms()) { - for (BooleanClause clause : boolQuery.clauses()) { - if (clause.getQuery() instanceof TermQuery) { - TermQuery inner = (TermQuery) clause.getQuery(); - assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); - } + for (BooleanClause clause : boolQuery.clauses()) { + if (clause.getQuery() instanceof TermQuery) { + TermQuery inner = (TermQuery) clause.getQuery(); + assertThat(inner.getTerm().bytes().toString(), is(inner.getTerm().bytes().toString().toLowerCase(Locale.ROOT))); } } assertThat(boolQuery.clauses().size(), equalTo(queryBuilder.fields().size())); @@ -330,10 +311,9 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase { throw new UnsupportedOperationException(); }); } //see #11120 diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 9ccfd7243a5..cb9eb5a85a4 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -25,12 +25,10 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.TransportWriteActionTestHelper; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -65,6 +63,8 @@ import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnPrimary; +import static org.elasticsearch.action.index.TransportIndexAction.executeIndexRequestOnReplica; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -365,17 +365,19 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase @Override protected PrimaryResult performOnPrimary(IndexShard primary, IndexRequest request) throws Exception { - TransportWriteAction.WriteResult result = TransportIndexAction.executeIndexRequestOnPrimary(request, primary, - null); + final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, + null); request.primaryTerm(primary.getPrimaryTerm()); - TransportWriteActionTestHelper.performPostWriteActions(primary, request, result.getLocation(), logger); - return new PrimaryResult(request, result.getResponse()); + TransportWriteActionTestHelper.performPostWriteActions(primary, request, indexResult.getTranslogLocation(), logger); + IndexResponse response = new IndexResponse(primary.shardId(), request.type(), request.id(), indexResult.getVersion(), + indexResult.isCreated()); + return new PrimaryResult(request, response); } @Override protected void performOnReplica(IndexRequest request, IndexShard replica) { - Engine.Index index = TransportIndexAction.executeIndexRequestOnReplica(request, replica); - TransportWriteActionTestHelper.performPostWriteActions(replica, request, index.getTranslogLocation(), logger); + final Engine.IndexResult result = executeIndexRequestOnReplica(request, replica); + TransportWriteActionTestHelper.performPostWriteActions(replica, request, result.getTranslogLocation(), logger); } } } diff --git a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 2454150be56..7d7b7a4cd6e 100644 --- a/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -78,7 +78,8 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { } public void testCrossFieldMultiMatchQuery() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext(); + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }); queryShardContext.setAllowUnmappedFields(true); Query parsedQuery = multiMatchQuery("banon").field("name.first", 2).field("name.last", 3).field("foobar").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { @@ -101,8 +102,9 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {2, 3}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, - new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -116,8 +118,9 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz"), new Term("bar", "baz") }; float[] boosts = new float[] {200, 30}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, - new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -134,8 +137,9 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { Term[] terms = new Term[] { new Term("foo", "baz") }; float[] boosts = new float[] {2}; Query expected = BlendedTermQuery.booleanBlendedQuery(terms, boosts, false); - Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, - new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } @@ -157,13 +161,15 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { .add(expectedClause1, Occur.SHOULD) .add(expectedClause2, Occur.SHOULD) .build(); - Query actual = MultiMatchQuery.blendTerm(indexService.newQueryShardContext(), new BytesRef("baz"), null, 1f, - new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); + Query actual = MultiMatchQuery.blendTerm( + indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }), + new BytesRef("baz"), null, 1f, new FieldAndFieldType(ft1, 2), new FieldAndFieldType(ft2, 3)); assertEquals(expected, actual); } public void testMultiMatchPrefixWithAllField() throws IOException { - QueryShardContext queryShardContext = indexService.newQueryShardContext(); + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }); queryShardContext.setAllowUnmappedFields(true); Query parsedQuery = multiMatchQuery("foo").field("_all").type(MultiMatchQueryBuilder.Type.PHRASE_PREFIX).toQuery(queryShardContext); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 2248ff156ac..9cf6594e42c 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -408,7 +408,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { IndexingOperationListener listener = new IndexingOperationListener() { @Override - public void postIndex(Engine.Index index, boolean created) { + public void postIndex(Engine.Index index, Engine.IndexResult result) { try { assertNotNull(shardRef.get()); // this is all IMC needs to do - check current memory and refresh @@ -422,7 +422,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { @Override - public void postDelete(Engine.Delete delete) { + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { try { assertNotNull(shardRef.get()); // this is all IMC needs to do - check current memory and refresh diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index bd918457e21..c3c4454f8a0 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -115,6 +115,7 @@ import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex; +import static org.elasticsearch.common.lucene.Lucene.readScoreDoc; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; @@ -564,11 +565,15 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postIndex(Engine.Index index, boolean created) { - if (created) { - postIndexCreate.incrementAndGet(); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + if (result.hasFailure() == false) { + if (result.isCreated()) { + postIndexCreate.incrementAndGet(); + } else { + postIndexUpdate.incrementAndGet(); + } } else { - postIndexUpdate.incrementAndGet(); + postIndex(index, result.getFailure()); } } @@ -584,8 +589,12 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postDelete(Engine.Delete delete) { - postDelete.incrementAndGet(); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + if (result.hasFailure() == false) { + postDelete.incrementAndGet(); + } else { + postDelete(delete, result.getFailure()); + } } @Override @@ -1127,7 +1136,7 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postIndex(Engine.Index index, boolean created) { + public void postIndex(Engine.Index index, Engine.IndexResult result) { postIndex.incrementAndGet(); } @@ -1138,7 +1147,7 @@ public class IndexShardTests extends IndexShardTestCase { } @Override - public void postDelete(Engine.Delete delete) { + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { postDelete.incrementAndGet(); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index d1cf8b32f58..b810175fcd8 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -46,8 +46,12 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postIndex(Engine.Index index, boolean created) { - postIndex.incrementAndGet(); + public void postIndex(Engine.Index index, Engine.IndexResult result) { + if (result.hasFailure() == false) { + postIndex.incrementAndGet(); + } else { + postIndex(index, result.getFailure()); + } } @Override @@ -62,8 +66,12 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postDelete(Engine.Delete delete) { - postDelete.incrementAndGet(); + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + if (result.hasFailure() == false) { + postDelete.incrementAndGet(); + } else { + postDelete(delete, result.getFailure()); + } } @Override @@ -79,12 +87,14 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postIndex(Engine.Index index, boolean created) { - throw new RuntimeException(); } + public void postIndex(Engine.Index index, Engine.IndexResult result) { + throw new RuntimeException(); + } @Override public void postIndex(Engine.Index index, Exception ex) { - throw new RuntimeException(); } + throw new RuntimeException(); + } @Override public Engine.Delete preDelete(Engine.Delete delete) { @@ -92,8 +102,9 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postDelete(Engine.Delete delete) { - throw new RuntimeException(); } + public void postDelete(Engine.Delete delete, Engine.DeleteResult result) { + throw new RuntimeException(); + } @Override public void postDelete(Engine.Delete delete, Exception ex) { @@ -111,7 +122,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ IndexingOperationListener.CompositeListener compositeListener = new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", "1")); Engine.Index index = new Engine.Index(new Term("_uid", "1"), null); - compositeListener.postDelete(delete); + compositeListener.postDelete(delete, new Engine.DeleteResult(1, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); assertEquals(0, postIndexException.get()); @@ -135,7 +146,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ assertEquals(2, postDelete.get()); assertEquals(2, postDeleteException.get()); - compositeListener.postIndex(index, false); + compositeListener.postIndex(index, new Engine.IndexResult(0, false)); assertEquals(0, preIndex.get()); assertEquals(2, postIndex.get()); assertEquals(0, postIndexException.get()); diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 05147d4a72a..94c0407f700 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -137,7 +137,7 @@ public class RefreshListenersTests extends ESTestCase { public void testTooMany() throws Exception { assertFalse(listeners.refreshNeeded()); - Engine.Index index = index("1"); + Engine.IndexResult index = index("1"); // Fill the listener slots List nonForcedListeners = new ArrayList<>(maxListeners); @@ -168,7 +168,7 @@ public class RefreshListenersTests extends ESTestCase { } public void testAfterRefresh() throws Exception { - Engine.Index index = index("1"); + Engine.IndexResult index = index("1"); engine.refresh("I said so"); if (randomBoolean()) { index(randomFrom("1" /* same document */, "2" /* different document */)); @@ -198,7 +198,7 @@ public class RefreshListenersTests extends ESTestCase { refresher.start(); try { for (int i = 0; i < 1000; i++) { - Engine.Index index = index("1"); + Engine.IndexResult index = index("1"); DummyRefreshListener listener = new DummyRefreshListener(); boolean immediate = listeners.addOrNotify(index.getTranslogLocation(), listener); if (immediate) { @@ -234,8 +234,8 @@ public class RefreshListenersTests extends ESTestCase { for (int iteration = 1; iteration <= 50; iteration++) { try { String testFieldValue = String.format(Locale.ROOT, "%s%04d", threadId, iteration); - Engine.Index index = index(threadId, testFieldValue); - assertEquals(iteration, index.version()); + Engine.IndexResult index = index(threadId, testFieldValue); + assertEquals(iteration, index.getVersion()); DummyRefreshListener listener = new DummyRefreshListener(); listeners.addOrNotify(index.getTranslogLocation(), listener); @@ -245,7 +245,7 @@ public class RefreshListenersTests extends ESTestCase { } listener.assertNoError(); - Engine.Get get = new Engine.Get(false, index.uid()); + Engine.Get get = new Engine.Get(false, new Term("_uid", "test:"+threadId)); try (Engine.GetResult getResult = engine.get(get)) { assertTrue("document not found", getResult.exists()); assertEquals(iteration, getResult.version()); @@ -267,11 +267,11 @@ public class RefreshListenersTests extends ESTestCase { refresher.cancel(); } - private Engine.Index index(String id) { + private Engine.IndexResult index(String id) { return index(id, "test"); } - private Engine.Index index(String id, String testFieldValue) { + private Engine.IndexResult index(String id, String testFieldValue) { String type = "test"; String uid = type + ":" + id; Document document = new Document(); @@ -283,8 +283,7 @@ public class RefreshListenersTests extends ESTestCase { BytesReference source = new BytesArray(new byte[] { 1 }); ParsedDocument doc = new ParsedDocument(versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); - engine.index(index); - return index; + return engine.index(index); } private static class DummyRefreshListener implements Consumer { diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java index 4bd8ba9cb3e..74b61047ace 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.snapshots.blobstore; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; @@ -111,7 +111,7 @@ public class SlicedInputStreamTests extends ESTestCase { } private byte[] randomBytes(Random random) { - int length = RandomInts.randomIntBetween(random, 1, 10); + int length = RandomNumbers.randomIntBetween(random, 1, 10); byte[] data = new byte[length]; random.nextBytes(data); return data; diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index a996c9f4bd8..d1be0d77613 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -60,8 +60,8 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; +import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -178,7 +178,8 @@ public class CorruptedFileIT extends ESIntegTestCase { .timeout("5m") // sometimes due to cluster rebalacing and random settings default timeout is just not enough. .waitForNoRelocatingShards(true)).actionGet(); if (health.isTimedOut()) { - logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false)); } assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -284,7 +285,8 @@ public class CorruptedFileIT extends ESIntegTestCase { .health(Requests.clusterHealthRequest("test")).get(); if (response.getStatus() != ClusterHealthStatus.RED) { logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed); - logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); ClusterState state = client().admin().cluster().prepareState().get().getState(); @@ -445,7 +447,8 @@ public class CorruptedFileIT extends ESIntegTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest("test").waitForGreenStatus()).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("ensureGreen timed out, cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false)); } // we are green so primaries got not corrupted. diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 0803a788e8a..1e97d4dd57b 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -362,7 +362,7 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { public void testCanCache() throws Exception { assertAcked(client().admin().indices().prepareCreate("index").addMapping("type", "s", "type=date") .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true, IndexMetaData.SETTING_NUMBER_OF_SHARDS, - 5, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + 2, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .get()); indexRandom(true, client().prepareIndex("index", "type", "1").setRouting("1").setSource("s", "2016-03-19"), client().prepareIndex("index", "type", "2").setRouting("1").setSource("s", "2016-03-20"), @@ -411,7 +411,7 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), equalTo(0L)); - // If the request has an aggregation containng now we should not cache + // If the request has an aggregation containing now we should not cache final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))).get(); @@ -441,7 +441,7 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), equalTo(0L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), - equalTo(5L)); + equalTo(2L)); } public void testCacheWithFilteredAlias() { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 8bb8a4ddf8a..5d5584a156f 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -43,6 +43,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; public class IndicesRequestCacheTests extends ESTestCase { @@ -59,23 +60,25 @@ public class IndicesRequestCacheTests extends ESTestCase { AtomicBoolean indexShard = new AtomicBoolean(true); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); + BytesReference value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(entity.loadedFromCache()); + assertFalse(loader.loadedFromCache); assertEquals(1, cache.count()); // cache hit - entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + entity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(reader, 0); + value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(1, cache.count()); assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); assertEquals(1, cache.numRegisteredCloseListeners()); @@ -91,7 +94,7 @@ public class IndicesRequestCacheTests extends ESTestCase { assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); @@ -114,46 +117,50 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - BytesReference value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); + BytesReference value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(entity.loadedFromCache()); + assertFalse(loader.loadedFromCache); assertEquals(1, cache.count()); assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); final int cacheSize = requestCacheStats.stats().getMemorySize().bytesAsInt(); assertEquals(1, cache.numRegisteredCloseListeners()); // cache the second - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); - value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(secondReader, 0); + value = cache.getOrCompute(entity, loader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertFalse(secondEntity.loadedFromCache()); + assertFalse(loader.loadedFromCache); assertEquals(2, cache.count()); assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > cacheSize + value.length()); assertEquals(2, cache.numRegisteredCloseListeners()); - secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); - value = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + secondEntity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(secondReader, 0); + value = cache.getOrCompute(secondEntity, loader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(secondEntity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(2, cache.count()); - entity = new TestEntity(requestCacheStats, reader, indexShard, 0); - value = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + entity = new TestEntity(requestCacheStats, indexShard); + loader = new Loader(reader, 0); + value = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value.streamInput().readString()); assertEquals(2, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); + assertTrue(loader.loadedFromCache); assertEquals(2, cache.count()); // Closing the cache doesn't change returned entities @@ -161,8 +168,8 @@ public class IndicesRequestCacheTests extends ESTestCase { cache.cleanCache(); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); - assertTrue(secondEntity.loadedFromCache()); + assertTrue(loader.loadedFromCache); + assertTrue(loader.loadedFromCache); assertEquals(1, cache.count()); assertEquals(cacheSize, requestCacheStats.stats().getMemorySize().bytesAsInt()); assertEquals(1, cache.numRegisteredCloseListeners()); @@ -178,8 +185,8 @@ public class IndicesRequestCacheTests extends ESTestCase { cache.cleanCache(); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); - assertTrue(entity.loadedFromCache()); - assertTrue(secondEntity.loadedFromCache()); + assertTrue(loader.loadedFromCache); + assertTrue(loader.loadedFromCache); assertEquals(0, cache.count()); assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); @@ -200,16 +207,18 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); - - BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + Loader secondLoader = new Loader(secondReader, 0); + + BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.streamInput().readString()); size = requestCacheStats.stats().getMemorySize(); IOUtils.close(reader, secondReader, writer, dir, cache); @@ -226,24 +235,27 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, indexShard, 0); + TestEntity thirddEntity = new TestEntity(requestCacheStats, indexShard); + Loader thirdLoader = new Loader(thirdReader, 0); - BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.streamInput().readString()); logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); - BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.streamInput().readString()); assertEquals(2, cache.count()); assertEquals(1, requestCacheStats.stats().getEvictions()); @@ -262,25 +274,28 @@ public class IndicesRequestCacheTests extends ESTestCase { DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - TestEntity entity = new TestEntity(requestCacheStats, reader, indexShard, 0); + TestEntity entity = new TestEntity(requestCacheStats, indexShard); + Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, secondReader, indexShard, 0); + TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); AtomicBoolean differentIdentity = new AtomicBoolean(true); - TestEntity thirddEntity = new TestEntity(requestCacheStats, thirdReader, differentIdentity, 0); + TestEntity thirddEntity = new TestEntity(requestCacheStats, differentIdentity); + Loader thirdLoader = new Loader(thirdReader, 0); - BytesReference value1 = cache.getOrCompute(entity, reader, termQuery.buildAsBytes()); + BytesReference value1 = cache.getOrCompute(entity, loader, reader, termQuery.buildAsBytes()); assertEquals("foo", value1.streamInput().readString()); - BytesReference value2 = cache.getOrCompute(secondEntity, secondReader, termQuery.buildAsBytes()); + BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termQuery.buildAsBytes()); assertEquals("bar", value2.streamInput().readString()); logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); - BytesReference value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes()); assertEquals("baz", value3.streamInput().readString()); assertEquals(3, cache.count()); final long hitCount = requestCacheStats.stats().getHitCount(); @@ -289,7 +304,7 @@ public class IndicesRequestCacheTests extends ESTestCase { cache.cleanCache(); assertEquals(1, cache.count()); // third has not been validated since it's a different identity - value3 = cache.getOrCompute(thirddEntity, thirdReader, termQuery.buildAsBytes()); + value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termQuery.buildAsBytes()); assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount()); assertEquals("baz", value3.streamInput().readString()); @@ -303,20 +318,39 @@ public class IndicesRequestCacheTests extends ESTestCase { StringField.TYPE_STORED)); } + private static class Loader implements Supplier { + + private final DirectoryReader reader; + private final int id; + public boolean loadedFromCache = true; + + public Loader(DirectoryReader reader, int id) { + super(); + this.reader = reader; + this.id = id; + } + + @Override + public BytesReference get() { + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1); + assertEquals(1, topDocs.totalHits); + Document document = reader.document(topDocs.scoreDocs[0].doc); + out.writeString(document.get("value")); + loadedFromCache = false; + return out.bytes(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + } + private class TestEntity extends AbstractIndexShardCacheEntity { private final AtomicBoolean standInForIndexShard; private final ShardRequestCache shardRequestCache; - private TestEntity(ShardRequestCache shardRequestCache, DirectoryReader reader, AtomicBoolean standInForIndexShard, int id) { - super(new Loader() { - @Override - public void load(StreamOutput out) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); - TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1); - assertEquals(1, topDocs.totalHits); - Document document = reader.document(topDocs.scoreDocs[0].doc); - out.writeString(document.get("value")); - } - }); + private TestEntity(ShardRequestCache shardRequestCache, AtomicBoolean standInForIndexShard) { this.standInForIndexShard = standInForIndexShard; this.shardRequestCache = shardRequestCache; } @@ -335,5 +369,10 @@ public class IndicesRequestCacheTests extends ESTestCase { public Object getCacheIdentity() { return standInForIndexShard; } + + @Override + public long ramBytesUsed() { + return 42; + } } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 67a82d93c54..074b4a5d6bb 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -91,7 +91,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } catch (AssertionError error) { ClusterState finalState = state; logger.error((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState.prettyPrint()), error); + new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); throw error; } } @@ -107,7 +107,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } catch (AssertionError error) { logger.error((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", - node, event.previousState().prettyPrint(), event.state().prettyPrint()), error); + node, event.previousState(), event.state()), error); throw error; } @@ -117,7 +117,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } // TODO: check if we can go to green by starting all shards and finishing all iterations - logger.info("Final cluster state: {}", state.prettyPrint()); + logger.info("Final cluster state: {}", state); } /** diff --git a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 8ec629dbbdc..54bdfd05008 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -18,99 +18,48 @@ */ package org.elasticsearch.indices.state; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.junit.After; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -@ClusterScope(scope=Scope.TEST, numDataNodes=2) public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { - // Combined multiple tests into one, because cluster scope is test. - // The cluster scope is test b/c we can't clear cluster settings. - public void testCloseAllRequiresName() { - Settings clusterSettings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + + @After + public void afterTest() { + Settings settings = Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), (String)null) .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings)); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + public void testCloseAllRequiresName() { createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); - assertThat(healthResponse.isTimedOut(), equalTo(false)); - // Close all explicitly - try { - client().admin().indices().prepareClose("_all").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("*").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("test*").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("*", "-test1").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - // Close all wildcard - try { - client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet(); - fail(); - } catch (IllegalArgumentException e) { - } - - CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet(); - assertThat(closeIndexResponse.isAcknowledged(), equalTo(true)); + assertAcked(client().admin().indices().prepareClose("test3", "test2")); assertIndexIsClosed("test2", "test3"); // disable closing - Client client = client(); createIndex("test_no_close"); - healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); - assertThat(healthResponse.isTimedOut(), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false)).get(); + Settings settings = Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - try { - client.admin().indices().prepareClose("test_no_close").execute().actionGet(); - fail("exception expected"); - } catch (IllegalStateException ex) { - assertEquals(ex.getMessage(), "closing indices is disabled - set [cluster.indices.close.enable: true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); - } + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> client().admin().indices().prepareClose("test_no_close").get()); + assertEquals(illegalStateException.getMessage(), + "closing indices is disabled - set [cluster.indices.close.enable: true] to enable it. NOTE: closed indices still " + + "consume a significant amount of diskspace"); } private void assertIndexIsClosed(String... indices) { - checkIndexState(IndexMetaData.State.CLOSE, indices); - } - - private void checkIndexState(IndexMetaData.State state, String... indices) { ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet(); for (String index : indices) { IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index); - assertThat(indexMetaData, notNullValue()); - assertThat(indexMetaData.getState(), equalTo(state)); + assertNotNull(indexMetaData); + assertEquals(IndexMetaData.State.CLOSE, indexMetaData.getState()); } } } diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index b95d872a61e..a3f22817dad 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -489,11 +489,11 @@ public class IndexStatsIT extends ESIntegTestCase { } catch (VersionConflictEngineException e) {} stats = client().admin().indices().prepareStats().setTypes("type1", "type2").execute().actionGet(); - assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); - assertThat(stats.getIndex("test2").getTotal().getIndexing().getTotal().getIndexFailedCount(), equalTo(1L)); + assertThat(stats.getIndex("test1").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); + assertThat(stats.getIndex("test2").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(1L)); assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexFailedCount(), equalTo(1L)); assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2").getIndexFailedCount(), equalTo(1L)); - assertThat(stats.getTotal().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); + assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); } public void testMergeStats() { diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java new file mode 100644 index 00000000000..fea2e4699d5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.operateAllIndices; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.After; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class DestructiveOperationsIT extends ESIntegTestCase { + + @After + public void afterTest() { + Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), (String)null).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + public void testDeleteIndexIsRejected() throws Exception { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + + createIndex("index1", "1index"); + + // Should succeed, since no wildcards + assertAcked(client().admin().indices().prepareDelete("1index").get()); + + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareDelete("i*").get()); + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareDelete("_all").get()); + } + + public void testDeleteIndexDefaultBehaviour() throws Exception { + if (randomBoolean()) { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + createIndex("index1", "1index"); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareDelete("_all").get()); + } else { + assertAcked(client().admin().indices().prepareDelete("*").get()); + } + + assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); + } + + public void testCloseIndexIsRejected() throws Exception { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + + createIndex("index1", "1index"); + + // Should succeed, since no wildcards + assertAcked(client().admin().indices().prepareClose("1index").get()); + + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareClose("i*").get()); + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareClose("_all").get()); + } + + public void testCloseIndexDefaultBehaviour() throws Exception { + if (randomBoolean()) { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + createIndex("index1", "1index"); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareClose("_all").get()); + } else { + assertAcked(client().admin().indices().prepareClose("*").get()); + } + + ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (ObjectObjectCursor indexMetaDataObjectObjectCursor : state.getMetaData().indices()) { + assertEquals(IndexMetaData.State.CLOSE, indexMetaDataObjectObjectCursor.value.getState()); + } + } + + public void testOpenIndexIsRejected() throws Exception { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + + createIndex("index1", "1index"); + assertAcked(client().admin().indices().prepareClose("1index", "index1").get()); + + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareOpen("i*").get()); + expectThrows(IllegalArgumentException.class, () -> client().admin().indices().prepareOpen("_all").get()); + } + + public void testOpenIndexDefaultBehaviour() throws Exception { + if (randomBoolean()) { + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + createIndex("index1", "1index"); + assertAcked(client().admin().indices().prepareClose("1index", "index1").get()); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareOpen("_all").get()); + } else { + assertAcked(client().admin().indices().prepareOpen("*").get()); + } + + ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (ObjectObjectCursor indexMetaDataObjectObjectCursor : state.getMetaData().indices()) { + assertEquals(IndexMetaData.State.OPEN, indexMetaDataObjectObjectCursor.value.getState()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java deleted file mode 100644 index 28852d74696..00000000000 --- a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.operateAllIndices; - -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) -public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { - // One test for test performance, since cluster scope is test - // The cluster scope is test b/c we can't clear cluster settings. - public void testDestructiveOperations() throws Exception { - Settings settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareCreate("index1").get()); - assertAcked(client().admin().indices().prepareCreate("1index").get()); - - // Should succeed, since no wildcards - assertAcked(client().admin().indices().prepareDelete("1index").get()); - - try { - // should fail since index1 is the only index. - client().admin().indices().prepareDelete("i*").get(); - fail(); - } catch (IllegalArgumentException e) { - } - - try { - client().admin().indices().prepareDelete("_all").get(); - fail(); - } catch (IllegalArgumentException e) { - } - - settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareDelete("_all").get()); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); - - // end delete index: - // close index: - settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareCreate("index1").get()); - assertAcked(client().admin().indices().prepareCreate("1index").get()); - // Should succeed, since no wildcards - assertAcked(client().admin().indices().prepareClose("1index").get()); - - try { - client().admin().indices().prepareClose("_all").get(); - fail(); - } catch (IllegalArgumentException e) { - } - try { - assertAcked(client().admin().indices().prepareOpen("_all").get()); - fail(); - } catch (IllegalArgumentException e) { - } - try { - client().admin().indices().prepareClose("*").get(); - fail(); - } catch (IllegalArgumentException e) { - } - try { - assertAcked(client().admin().indices().prepareOpen("*").get()); - fail(); - } catch (IllegalArgumentException e) { - } - - settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - assertAcked(client().admin().indices().prepareClose("_all").get()); - assertAcked(client().admin().indices().prepareOpen("_all").get()); - - // end close index: - client().admin().indices().prepareDelete("_all").get(); - } -} diff --git a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index a180d6feb8e..4a61bebd4db 100644 --- a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -149,7 +149,8 @@ public class FullRollingRestartIT extends ESIntegTestCase { ClusterState state = client().admin().cluster().prepareState().get().getState(); RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state, + recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); } internalCluster().restartRandomDataNode(); ensureGreen(); @@ -157,7 +158,8 @@ public class FullRollingRestartIT extends ESIntegTestCase { recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state, + recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); } } } diff --git a/core/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java b/core/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java new file mode 100644 index 00000000000..2bc0d0bdc81 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponse.Empty; + +import java.util.concurrent.atomic.AtomicReference; + +public class RestBuilderListenerTests extends ESTestCase { + + public void testXContentBuilderClosedInBuildResponse() throws Exception { + AtomicReference builderAtomicReference = new AtomicReference<>(); + RestBuilderListener builderListener = + new RestBuilderListener(new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1)) { + @Override + public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { + builderAtomicReference.set(builder); + builder.close(); + return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + }; + + builderListener.buildResponse(Empty.INSTANCE); + assertNotNull(builderAtomicReference.get()); + assertTrue(builderAtomicReference.get().generator().isClosed()); + } + + public void testXContentBuilderNotClosedInBuildResponseAssertionsDisabled() throws Exception { + AtomicReference builderAtomicReference = new AtomicReference<>(); + RestBuilderListener builderListener = + new RestBuilderListener(new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1)) { + @Override + public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { + builderAtomicReference.set(builder); + return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + + @Override + boolean assertBuilderClosed(XContentBuilder xContentBuilder) { + // don't check the actual builder being closed so we can test auto close + return true; + } + }; + + builderListener.buildResponse(Empty.INSTANCE); + assertNotNull(builderAtomicReference.get()); + assertTrue(builderAtomicReference.get().generator().isClosed()); + } + + public void testXContentBuilderNotClosedInBuildResponseAssertionsEnabled() throws Exception { + assumeTrue("tests are not being run with assertions", RestBuilderListener.class.desiredAssertionStatus()); + + RestBuilderListener builderListener = + new RestBuilderListener(new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1)) { + @Override + public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY); + } + }; + + AssertionError error = expectThrows(AssertionError.class, () -> builderListener.buildResponse(Empty.INSTANCE)); + assertEquals("callers should ensure the XContentBuilder is closed themselves", error.getMessage()); + } +} diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index eeafbde8850..efff2b0834e 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -62,7 +62,7 @@ public class NativeScriptTests extends ESTestCase { Settings.Builder builder = Settings.builder(); if (randomBoolean()) { ScriptType scriptType = randomFrom(ScriptType.values()); - builder.put("script" + "." + scriptType.getScriptType(), randomBoolean()); + builder.put("script" + "." + scriptType.getName(), randomBoolean()); } else { ScriptContext scriptContext = randomFrom(ScriptContext.Standard.values()); builder.put("script" + "." + scriptContext.getKey(), randomBoolean()); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java index a56d056cd6b..f6a02ae9206 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -125,7 +125,7 @@ public class ScriptModesTests extends ESTestCase { ScriptType[] randomScriptTypes = randomScriptTypesSet.toArray(new ScriptType[randomScriptTypesSet.size()]); Settings.Builder builder = Settings.builder(); for (int i = 0; i < randomInt; i++) { - builder.put("script" + "." + randomScriptTypes[i].getScriptType(), randomScriptModes[i]); + builder.put("script" + "." + randomScriptTypes[i].getName(), randomScriptModes[i]); } this.scriptModes = new ScriptModes(scriptSettings, builder.build()); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 8ed5f4c957d..2b14ba3f4d0 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -266,9 +266,9 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); for (Map.Entry entry : scriptSourceSettings.entrySet()) { if (entry.getValue()) { - builder.put("script" + "." + entry.getKey().getScriptType(), "true"); + builder.put("script" + "." + entry.getKey().getName(), "true"); } else { - builder.put("script" + "." + entry.getKey().getScriptType(), "false"); + builder.put("script" + "." + entry.getKey().getName(), "false"); } } for (Map.Entry entry : scriptContextSettings.entrySet()) { diff --git a/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java b/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java index f9b344fc264..53653392e6e 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/core/src/test/java/org/elasticsearch/search/SearchCancellationIT.java @@ -28,8 +28,11 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; @@ -58,6 +61,9 @@ import static org.hamcrest.Matchers.hasSize; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class SearchCancellationIT extends ESIntegTestCase { + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + @Override protected Collection> nodePlugins() { return Collections.singleton(ScriptedBlockPlugin.class); @@ -65,15 +71,17 @@ public class SearchCancellationIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), randomBoolean()).build(); + boolean lowLevelCancellation = randomBoolean(); + logger.info("Using lowLevelCancellation: {}", lowLevelCancellation); + return Settings.builder().put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), lowLevelCancellation).build(); } private void indexTestData() { - for (int i = 0; i < 10; i++) { + for (int i = 0; i < 5; i++) { // Make sure we have a few segments BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for(int j=0; j<10; j++) { - bulkRequestBuilder.add(client().prepareIndex("test", "type", Integer.toString(i*10 + j)).setSource("field", "value")); + for (int j = 0; j < 20; j++) { + bulkRequestBuilder.add(client().prepareIndex("test", "type", Integer.toString(i * 5 + j)).setSource("field", "value")); } assertNoFailures(bulkRequestBuilder.get()); } @@ -145,6 +153,7 @@ public class SearchCancellationIT extends ESIntegTestCase { awaitForBlock(plugins); cancelSearch(SearchAction.NAME); disableBlocks(plugins); + logger.info("Segments {}", XContentHelper.toString(client().admin().indices().prepareSegments("test").get(), FORMAT_PARAMS)); ensureSearchWasCancelled(searchResponse); } @@ -162,6 +171,7 @@ public class SearchCancellationIT extends ESIntegTestCase { awaitForBlock(plugins); cancelSearch(SearchAction.NAME); disableBlocks(plugins); + logger.info("Segments {}", XContentHelper.toString(client().admin().indices().prepareSegments("test").get(), FORMAT_PARAMS)); ensureSearchWasCancelled(searchResponse); } @@ -222,7 +232,7 @@ public class SearchCancellationIT extends ESIntegTestCase { disableBlocks(plugins); SearchResponse response = ensureSearchWasCancelled(scrollResponse); - if (response != null){ + if (response != null) { // The response didn't fail completely - update scroll id scrollId = response.getScrollId(); } @@ -285,6 +295,7 @@ public class SearchCancellationIT extends ESIntegTestCase { public class NativeTestScriptedBlock extends AbstractSearchScript { @Override public Object run() { + Loggers.getLogger(SearchCancellationIT.class).info("Blocking on the document {}", doc().get("_uid")); hits.incrementAndGet(); try { awaitBusy(() -> shouldBlock.get() == false); diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index d190f76b3fc..a3ecc66c030 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -1148,7 +1148,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { hasChildQuery( "child_type_one", boolQuery().must( - queryStringQuery("name:William*").analyzeWildcard(true) + queryStringQuery("name:William*") ), ScoreMode.None) ), @@ -1165,7 +1165,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { hasChildQuery( "child_type_two", boolQuery().must( - queryStringQuery("name:William*").analyzeWildcard(true) + queryStringQuery("name:William*") ), ScoreMode.None) ), diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 14cd65335f4..ac6bc9ab359 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoPoint; @@ -40,15 +41,19 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder.Field; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.hamcrest.Matcher; import org.hamcrest.Matchers; +import org.joda.time.DateTime; +import org.joda.time.chrono.ISOChronology; import java.io.IOException; import java.util.Collection; @@ -84,6 +89,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHigh import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNotHighlighted; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -2948,4 +2954,34 @@ public class HighlighterSearchIT extends ESIntegTestCase { equalTo("The quick brown fox jumps over the lazy dog")); } } + + public void testHighlightQueryRewriteDatesWithNow() throws Exception { + assertAcked(client().admin().indices().prepareCreate("index-1").addMapping("type", "d", "type=date", + "field", "type=text,store=true,term_vector=with_positions_offsets") + .setSettings("index.number_of_replicas", 0, "index.number_of_shards", 2) + .get()); + DateTime now = new DateTime(ISOChronology.getInstanceUTC()); + indexRandom(true, client().prepareIndex("index-1", "type", "1").setSource("d", now, "field", "hello world"), + client().prepareIndex("index-1", "type", "2").setSource("d", now.minusDays(1), "field", "hello"), + client().prepareIndex("index-1", "type", "3").setSource("d", now.minusDays(2), "field", "world")); + ensureSearchable("index-1"); + for (int i = 0; i < 5; i++) { + final SearchResponse r1 = client().prepareSearch("index-1") + .addSort("d", SortOrder.DESC) + .setTrackScores(true) + .highlighter(highlight() + .field("field") + .preTags("") + .postTags("") + ).setQuery(QueryBuilders.boolQuery().must( + QueryBuilders.rangeQuery("d").gte("now-7d/d").lte("now").includeLower(true).includeUpper(true).boost(1.0f)) + .should(QueryBuilders.termQuery("field", "hello"))) + .get(); + + assertSearchResponse(r1); + assertThat(r1.getHits().getTotalHits(), equalTo(3L)); + assertHighlight(r1, 0, "field", 0, 1, + equalTo("hello world")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 8c501d71e0a..819f93fcc0f 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -190,7 +190,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { "ZXJtP4AAAAANbUtDSnpHU3lidm5KUBUMaVpqeG9vcm5QSFlvAAEBLGdtcWxuRWpWTXdvTlhMSHh0RWlFdHBnbEF1cUNmVmhoUVlwRFZxVllnWWV1A2ZvbwEA" + "AQhwYWlubGVzc/8AALk4AAAAAAABAAAAAAAAAwpKU09PU0ZmWnhFClVqTGxMa2p3V2gKdUJwZ3R3dXFER5Hg97uT7MOmPgEADw")); try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { - in.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + in.setVersion(Version.V_5_0_0); ShardSearchTransportRequest readRequest = new ShardSearchTransportRequest(); readRequest.readFrom(in); assertEquals(0, in.available()); @@ -214,7 +214,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { .should(QueryBuilders.termQuery("foo", "bar2")) ); BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(ShardValidateQueryRequestTests.V_5_0_0); + output.setVersion(Version.V_5_0_0); readRequest.writeTo(output); assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); } diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 6ee6f56c536..1cb9d6508aa 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -480,19 +480,19 @@ public class SearchQueryIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*").analyzeWildcard(true)).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue*").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue*")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue_1").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue_1")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("val*e_1").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("val*e_1")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("v?l*e?1").analyzeWildcard(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("v?l*e?1")).get(); assertHitCount(searchResponse, 1L); } @@ -502,18 +502,14 @@ public class SearchQueryIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(true)).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("ValUE_*").lowercaseExpandedTerms(true)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("ValUE_*")).get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch().setQuery(queryStringQuery("vAl*E_1")).get(); assertHitCount(searchResponse, 1L); searchResponse = client().prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); } // Issue #3540 @@ -532,11 +528,11 @@ public class SearchQueryIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertHitCount(searchResponse, 1L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get(); + searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]")).get(); assertHitCount(searchResponse, 1L); SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch() - .setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get()); + .setQuery(queryStringQuery("future:[now/D TO now+2M/d]")).get()); assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.toString(), containsString("unit [D] not supported for date math")); } diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 9502a818315..b98bc5d43cd 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -30,12 +30,10 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; -import java.util.Locale; import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -158,49 +156,6 @@ public class SimpleQueryStringIT extends ESIntegTestCase { assertSearchHits(searchResponse, "6", "7", "8"); } - public void testSimpleQueryStringLowercasing() { - createIndex("test"); - client().prepareIndex("test", "type1", "1").setSource("body", "Professional").get(); - refresh(); - - SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("Professio*")).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("Professio*").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("Professionan~1")).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("Professionan~1").lowercaseExpandedTerms(false)).get(); - assertHitCount(searchResponse, 0L); - } - - public void testQueryStringLocale() { - createIndex("test"); - client().prepareIndex("test", "type1", "1").setSource("body", "bılly").get(); - refresh(); - - SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("BILL*")).get(); - assertHitCount(searchResponse, 0L); - searchResponse = client().prepareSearch().setQuery(queryStringQuery("body:BILL*")).get(); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch().setQuery( - simpleQueryStringQuery("BILL*").locale(new Locale("tr", "TR"))).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - searchResponse = client().prepareSearch().setQuery( - queryStringQuery("body:BILL*").locale(new Locale("tr", "TR"))).get(); - assertHitCount(searchResponse, 1L); - assertSearchHits(searchResponse, "1"); - } - public void testNestedFieldSimpleQueryString() throws IOException { assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder() @@ -342,7 +297,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase { refresh(); SearchResponse searchResponse = client().prepareSearch() - .setQuery(simpleQueryStringQuery("Köln*").analyzeWildcard(true).field("location")).get(); + .setQuery(simpleQueryStringQuery("Köln*").field("location")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "1"); @@ -393,7 +348,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase { refresh(); SearchResponse searchResponse = client().prepareSearch() - .setQuery(simpleQueryStringQuery("the*").analyzeWildcard(true).field("body")).get(); + .setQuery(simpleQueryStringQuery("the*").field("body")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 0L); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index b7ce99e6ea3..0c8dbd4c42c 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -34,10 +34,14 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexStat import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Client; @@ -54,6 +58,8 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -64,10 +70,16 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.ingest.IngestTestPlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.StoredScriptsIT; +import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.junit.annotations.TestLogging; import java.nio.channels.SeekableByteChannel; @@ -76,6 +88,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -86,7 +99,9 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAliasesExist; @@ -109,6 +124,14 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IngestTestPlugin.class, + StoredScriptsIT.CustomScriptPlugin.class, + MockRepository.Plugin.class); + } + public void testBasicWorkFlow() throws Exception { Client client = client(); @@ -459,11 +482,39 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertAcked(client.admin().cluster().preparePutRepository("test-repo") .setType("fs").setSettings(Settings.builder().put("location", location))); - logger.info("--> creating test template"); - assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", XContentFactory.jsonBuilder().startObject().startObject("test-mapping").startObject("properties") - .startObject("field1").field("type", "string").field("store", "yes").endObject() - .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() - .endObject().endObject().endObject()).get().isAcknowledged(), equalTo(true)); + boolean testTemplate = randomBoolean(); + boolean testPipeline = randomBoolean(); + boolean testScript = (testTemplate == false && testPipeline == false) || randomBoolean(); // At least something should be stored + + if(testTemplate) { + logger.info("--> creating test template"); + assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", XContentFactory.jsonBuilder().startObject().startObject("test-mapping").startObject("properties") + .startObject("field1").field("type", "string").field("store", "yes").endObject() + .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() + .endObject().endObject().endObject()).get().isAcknowledged(), equalTo(true)); + } + + if(testPipeline) { + logger.info("--> creating test pipeline"); + BytesReference pipelineSource = jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject().bytes(); + assertAcked(client().admin().cluster().preparePutPipeline("barbaz", pipelineSource).get()); + } + + if(testScript) { + logger.info("--> creating test script"); + assertAcked(client().admin().cluster().preparePutStoredScript() + .setScriptLang(MockScriptEngine.NAME) + .setId("foobar") + .setSource(new BytesArray("{\"script\":\"1\"}"))); + } logger.info("--> snapshot without global state"); CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get(); @@ -477,26 +528,52 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - logger.info("--> delete test template"); - cluster().wipeTemplates("test-template"); - GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); - assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); + if (testTemplate) { + logger.info("--> delete test template"); + cluster().wipeTemplates("test-template"); + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); + } + + if (testPipeline) { + logger.info("--> delete test pipeline"); + assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + } + + if (testScript) { + logger.info("--> delete test script"); + assertAcked(client().admin().cluster().prepareDeleteStoredScript(MockScriptEngine.NAME, "foobar").get()); + } logger.info("--> try restoring cluster state from snapshot without global state"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); - getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); - logger.info("--> check that template is restored"); - getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); - assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); + if (testTemplate) { + logger.info("--> check that template is restored"); + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); + } + + if (testPipeline) { + logger.info("--> check that pipeline is restored"); + GetPipelineResponse getPipelineResponse = client().admin().cluster().prepareGetPipeline("barbaz").get(); + assertTrue(getPipelineResponse.isFound()); + } + + if (testScript) { + logger.info("--> check that script is restored"); + GetStoredScriptResponse getStoredScriptResponse = client().admin().cluster().prepareGetStoredScript(MockScriptEngine.NAME, "foobar").get(); + assertNotNull(getStoredScriptResponse.getStoredScript()); + } createIndex("test-idx"); ensureGreen(); @@ -514,9 +591,19 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - logger.info("--> delete test template and index "); + logger.info("--> delete global state and index "); cluster().wipeIndices("test-idx"); - cluster().wipeTemplates("test-template"); + if (testTemplate) { + cluster().wipeTemplates("test-template"); + } + if (testPipeline) { + assertAcked(client().admin().cluster().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + } + + if (testScript) { + assertAcked(client().admin().cluster().prepareDeleteStoredScript(MockScriptEngine.NAME, "foobar").get()); + } + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); @@ -525,9 +612,11 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - logger.info("--> check that template wasn't restored but index was"); + logger.info("--> check that global state wasn't restored but index was"); getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); + assertFalse(client().admin().cluster().prepareGetPipeline("barbaz").get().isFound()); + assertNull(client().admin().cluster().prepareGetStoredScript(MockScriptEngine.NAME, "foobar").get().getStoredScript()); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo(100L)); } diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index eeebe8cbcdc..e18ba0fe322 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.geo; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.vividsolutions.jts.algorithm.ConvexHull; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; @@ -61,7 +61,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { private static final ShapeType[] types = values(); public static ShapeType randomType(Random r) { - return types[RandomInts.randomIntBetween(r, 0, types.length - 1)]; + return types[RandomNumbers.randomIntBetween(r, 0, types.length - 1)]; } } @@ -115,7 +115,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { throws InvalidShapeException { if (numGeometries <= 0) { // cap geometry collection at 4 shapes (to save test time) - numGeometries = RandomInts.randomIntBetween(r, 2, 4); + numGeometries = RandomNumbers.randomIntBetween(r, 2, 4); } if (nearPoint == null) { @@ -187,7 +187,7 @@ public class RandomShapeGenerator extends RandomGeoGenerator { // for random testing having a maximum number of 10 points for a line string is more than sufficient // if this number gets out of hand, the number of self intersections for a linestring can become // (n^2-n)/2 and computing the relation intersection matrix will become NP-Hard - int numPoints = RandomInts.randomIntBetween(r, 3, 10); + int numPoints = RandomNumbers.randomIntBetween(r, 3, 10); CoordinatesBuilder coordinatesBuilder = new CoordinatesBuilder(); for (int i=0; i= 90; + return r.nextInt(100) >= 90; } private static Range xRandomRange(Random r, double near, Range bounds) { diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 0bc4974f285..6121b2c0c86 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -19,6 +19,18 @@ package org.elasticsearch.tribe; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.client.Client; @@ -46,18 +58,6 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Before; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - import static java.util.stream.Collectors.toSet; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; diff --git a/core/src/test/resources/indices/bwc/index-5.0.0.zip b/core/src/test/resources/indices/bwc/index-5.0.0.zip new file mode 100644 index 00000000000..422b2587760 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-5.0.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-5.0.0.zip b/core/src/test/resources/indices/bwc/repo-5.0.0.zip new file mode 100644 index 00000000000..7145bb1ceec Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-5.0.0.zip differ diff --git a/dev-tools/create_bwc_index.py b/dev-tools/create_bwc_index.py index 5a262d23cb6..2e6b92d8968 100644 --- a/dev-tools/create_bwc_index.py +++ b/dev-tools/create_bwc_index.py @@ -265,12 +265,20 @@ def generate_index(client, version, index_name): mappings['doc'] = {'properties' : {}} supports_dots_in_field_names = parse_version(version) >= parse_version("2.4.0") if supports_dots_in_field_names: - mappings["doc"]['properties'].update({ + + if parse_version(version) < parse_version("5.0.0-alpha1"): + mappings["doc"]['properties'].update({ 'field.with.dots': { 'type': 'string', 'boost': 4 } }) + else: + mappings["doc"]['properties'].update({ + 'field.with.dots': { + 'type': 'text' + } + }) if parse_version(version) < parse_version("5.0.0-alpha1"): mappings['norms'] = { @@ -339,7 +347,10 @@ def generate_index(client, version, index_name): if warmers: body['warmers'] = warmers client.indices.create(index=index_name, body=body) - health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) + if parse_version(version) < parse_version("5.0.0-alpha1"): + health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) + else: + health = client.cluster.health(wait_for_status='green', wait_for_no_relocating_shards=True) assert health['timed_out'] == False, 'cluster health timed out %s' % health num_docs = random.randint(2000, 3000) diff --git a/dev-tools/get-bwc-version.py b/dev-tools/get-bwc-version.py index 54c559d1dc8..4ef9736ea06 100644 --- a/dev-tools/get-bwc-version.py +++ b/dev-tools/get-bwc-version.py @@ -9,7 +9,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on +# software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. @@ -38,7 +38,7 @@ def parse_config(): def main(): c = parse_config() - + if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) @@ -53,7 +53,7 @@ def main(): shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) - return + return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: @@ -67,14 +67,14 @@ def main(): elif c.version.startswith('0.') or c.version.startswith('1.'): url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename else: - url = 'http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/%s/%s' % (c.version, filename) + url = 'https://artifacts.elastic.co/downloads/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) - archive.extractall() + archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) diff --git a/distribution/build.gradle b/distribution/build.gradle index 42b696d9cce..cc4912888da 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -196,6 +196,13 @@ subprojects { * Zip and tgz configuration * *****************************************************************************/ configure(subprojects.findAll { ['zip', 'tar', 'integ-test-zip'].contains(it.name) }) { + // CopySpec does not make it easy to create an empty director so we create the directory that we want, and then point CopySpec to its + // parent to copy to the root of the distribution + File plugins = new File(buildDir, 'plugins-hack/plugins') + task createPluginsDir(type: EmptyDirTask) { + dir "${plugins}" + dirMode 0755 + } project.ext.archivesFiles = copySpec { into("elasticsearch-${version}") { with libFiles @@ -214,6 +221,11 @@ configure(subprojects.findAll { ['zip', 'tar', 'integ-test-zip'].contains(it.nam MavenFilteringHack.filter(it, expansions) } } + into('') { + from { + plugins.getParent() + } + } with commonFiles from('../src/main/resources') { include 'bin/*.exe' diff --git a/distribution/integ-test-zip/build.gradle b/distribution/integ-test-zip/build.gradle index ae4a499efd8..80da4131995 100644 --- a/distribution/integ-test-zip/build.gradle +++ b/distribution/integ-test-zip/build.gradle @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.plugin.PluginBuildPlugin task buildZip(type: Zip) { + dependsOn createPluginsDir baseName = 'elasticsearch' with archivesFiles } diff --git a/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 deleted file mode 100644 index 57aec3f4ac2..00000000000 --- a/distribution/licenses/lucene-analyzers-common-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d254d52dd394b5079129f3d5f3bf4f2d44a5936e \ No newline at end of file diff --git a/distribution/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..1626a88f4a2 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +61aacb657e44a9beabf95834e106bbb96373a703 \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 deleted file mode 100644 index 04aefc62f61..00000000000 --- a/distribution/licenses/lucene-backward-codecs-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b625bb21456b3c0d1e5e431bced125cb060c1abd \ No newline at end of file diff --git a/distribution/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..2f45d50eeee --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +600de75a81e259cab0384e546d9a1d527ddba6d6 \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.2.0.jar.sha1 b/distribution/licenses/lucene-core-6.2.0.jar.sha1 deleted file mode 100644 index 2d74124e624..00000000000 --- a/distribution/licenses/lucene-core-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -849ee62525a294416802be2cacc66c80352f6f13 \ No newline at end of file diff --git a/distribution/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..9dcdbeb40e9 --- /dev/null +++ b/distribution/licenses/lucene-core-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +188774468a56a8731ca639527d721060d26ffebd \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 b/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 deleted file mode 100644 index 6ba525a038f..00000000000 --- a/distribution/licenses/lucene-grouping-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9527fedfd5acc624b2bb3f862bd99fb8f470b053 \ No newline at end of file diff --git a/distribution/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..14c8d7aa2b7 --- /dev/null +++ b/distribution/licenses/lucene-grouping-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +5afd9271e3d8f645440f48ff2487545ae5573e7e \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 b/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 deleted file mode 100644 index c258e3fb850..00000000000 --- a/distribution/licenses/lucene-highlighter-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ca342372a3f45e32bbd21cecaa757e38eccb8a5 \ No newline at end of file diff --git a/distribution/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..e695284756d --- /dev/null +++ b/distribution/licenses/lucene-highlighter-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +0f575175e26d4d3b1095f6300cbefbbb3ee994cd \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.2.0.jar.sha1 b/distribution/licenses/lucene-join-6.2.0.jar.sha1 deleted file mode 100644 index 01989e96a58..00000000000 --- a/distribution/licenses/lucene-join-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da0b8de98511abd4fe9c7d48a353d17854c5ed46 \ No newline at end of file diff --git a/distribution/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..ad02b0cac3b --- /dev/null +++ b/distribution/licenses/lucene-join-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ee898c3d318681c9f29c56e6d9b52876be96d814 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.2.0.jar.sha1 b/distribution/licenses/lucene-memory-6.2.0.jar.sha1 deleted file mode 100644 index b8a4a87efe2..00000000000 --- a/distribution/licenses/lucene-memory-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc9e075b1ee051c8e5246c237c38d8e71dab8a66 \ No newline at end of file diff --git a/distribution/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..0e36d650670 --- /dev/null +++ b/distribution/licenses/lucene-memory-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ea6defd322456711394b4dabcda70a217e3caacd \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.2.0.jar.sha1 b/distribution/licenses/lucene-misc-6.2.0.jar.sha1 deleted file mode 100644 index f4e081865ad..00000000000 --- a/distribution/licenses/lucene-misc-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94ddde6312566a4da4a50a88e453b6c82c759b41 \ No newline at end of file diff --git a/distribution/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..e458570651a --- /dev/null +++ b/distribution/licenses/lucene-misc-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ea2de7f9753a8e19a1ec9f25a3ea65d7ce909a0e \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.2.0.jar.sha1 b/distribution/licenses/lucene-queries-6.2.0.jar.sha1 deleted file mode 100644 index f7270a23afe..00000000000 --- a/distribution/licenses/lucene-queries-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dce47238f78e3e97d91dc6fefa9f46f07866bc2b \ No newline at end of file diff --git a/distribution/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..1231424e3be --- /dev/null +++ b/distribution/licenses/lucene-queries-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +0b15c6f29bfb9ec14a4615013a94bfa43a63793d \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 b/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 deleted file mode 100644 index 8e95aa600ec..00000000000 --- a/distribution/licenses/lucene-queryparser-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17ef728ac15e668bfa1105321611548424637645 \ No newline at end of file diff --git a/distribution/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..a367f4e45cf --- /dev/null +++ b/distribution/licenses/lucene-queryparser-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +d89d9fa1036c38144e0b8db079ae959353847c86 \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 b/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 deleted file mode 100644 index 1f34be3033d..00000000000 --- a/distribution/licenses/lucene-sandbox-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -520183f7b9aba77a26e224760c420a3844b0631a \ No newline at end of file diff --git a/distribution/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..4c8874c0b4b --- /dev/null +++ b/distribution/licenses/lucene-sandbox-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +c003c1ab0a19a02b30156ce13372cff1001d6a7d \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 deleted file mode 100644 index 22e81792e40..00000000000 --- a/distribution/licenses/lucene-spatial-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dba929b66927b936fbc76103b109ad6c824daee \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..75dd8263828 --- /dev/null +++ b/distribution/licenses/lucene-spatial-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +a3c570bf588d7c9ca43d074db9ce9c9b8408b930 \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 deleted file mode 100644 index d5e8f379d78..00000000000 --- a/distribution/licenses/lucene-spatial-extras-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b5a6ef5cd90c0218a72e9e2f7e60104be2447da \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..debd8e0b873 --- /dev/null +++ b/distribution/licenses/lucene-spatial-extras-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +de54ca61f5892cf2c88ac083b3332a827beca7ff \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 deleted file mode 100644 index d0ce5275a26..00000000000 --- a/distribution/licenses/lucene-spatial3d-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fcdb0567725722c5145149d1502848b6a96ec18d \ No newline at end of file diff --git a/distribution/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..b9eb9a0c270 --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +cacdf81b324acd335be63798d5a3dd16e7dff9a3 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 b/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 deleted file mode 100644 index 39392ad1158..00000000000 --- a/distribution/licenses/lucene-suggest-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d9d526c51f483d27f425c75d7e56bc0849242d6 \ No newline at end of file diff --git a/distribution/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 b/distribution/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..a6517bc7d42 --- /dev/null +++ b/distribution/licenses/lucene-suggest-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +a5cb3723bc8e0db185fc43e57b648145de27fde8 \ No newline at end of file diff --git a/distribution/src/main/resources/bin/elasticsearch-service.bat b/distribution/src/main/resources/bin/elasticsearch-service.bat index 609b8bda846..f2aa5c3e3de 100644 --- a/distribution/src/main/resources/bin/elasticsearch-service.bat +++ b/distribution/src/main/resources/bin/elasticsearch-service.bat @@ -209,15 +209,15 @@ for %%a in ("%ES_JAVA_OPTS:;=","%") do ( @endlocal & set JVM_MS=%JVM_MS% & set JVM_MX=%JVM_MX% & set JVM_SS=%JVM_SS% if "%JVM_MS%" == "" ( - echo minimum heap size not set; configure via %ES_JVM_OPTIONS% or ES_JAVA_OPTS + echo minimum heap size not set; configure using -Xms via %ES_JVM_OPTIONS% or ES_JAVA_OPTS goto:eof ) if "%JVM_MX%" == "" ( - echo maximum heap size not set; configure via %ES_JVM_OPTIONS% or ES_JAVA_OPTS + echo maximum heap size not set; configure using -Xmx via %ES_JVM_OPTIONS% or ES_JAVA_OPTS goto:eof ) if "%JVM_SS%" == "" ( - echo thread stack size not set; configure via %ES_JVM_OPTIONS% or ES_JAVA_OPTS + echo thread stack size not set; configure using -Xss via %ES_JVM_OPTIONS% or ES_JAVA_OPTS goto:eof ) diff --git a/distribution/tar/build.gradle b/distribution/tar/build.gradle index 8e34a6ce9ce..2140061ee48 100644 --- a/distribution/tar/build.gradle +++ b/distribution/tar/build.gradle @@ -18,6 +18,7 @@ */ task buildTar(type: Tar) { + dependsOn createPluginsDir baseName = 'elasticsearch' extension = 'tar.gz' with archivesFiles diff --git a/distribution/zip/build.gradle b/distribution/zip/build.gradle index ae4a499efd8..80da4131995 100644 --- a/distribution/zip/build.gradle +++ b/distribution/zip/build.gradle @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.plugin.PluginBuildPlugin task buildZip(type: Zip) { + dependsOn createPluginsDir baseName = 'elasticsearch' with archivesFiles } diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc index f5f20b30a9c..2da24e93c22 100644 --- a/docs/java-api/search.asciidoc +++ b/docs/java-api/search.asciidoc @@ -58,7 +58,7 @@ SearchResponse scrollResp = client.prepareSearch(test) .addSort(FieldSortBuilder.DOC_FIELD_NAME, SortOrder.ASC) .setScroll(new TimeValue(60000)) .setQuery(qb) - .setSize(100).execute().actionGet(); //100 hits per shard will be returned for each scroll + .setSize(100).execute().actionGet(); //max of 100 hits will be returned for each scroll //Scroll until no hits are returned do { for (SearchHit hit : scrollResp.getHits().getHits()) { @@ -68,10 +68,6 @@ do { scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet(); } while(scrollResp.getHits().getHits().length != 0); // Zero hits mark the end of the scroll and the while loop. -------------------------------------------------- -[NOTE] -==== -The size-parameter is per shard, so if you run a query against multiple indices (leading to many shards being involved in the query) the result might be more documents per execution of the scroll than you would expect! -==== [[java-search-msearch]] === MultiSearch API diff --git a/docs/plugins/alerting.asciidoc b/docs/plugins/alerting.asciidoc index 9472dbb6382..1e365306a84 100644 --- a/docs/plugins/alerting.asciidoc +++ b/docs/plugins/alerting.asciidoc @@ -8,11 +8,10 @@ Alerting plugins allow Elasticsearch to monitor indices and to trigger alerts wh The core alerting plugins are: -link:/products/watcher[Watcher]:: +link:/products/x-pack/alerting[X-Pack]:: -Watcher is the alerting and notification product for Elasticsearch that lets -you take action based on changes in your data. It is designed around the +X-Pack contains the alerting and notification product for Elasticsearch that +lets you take action based on changes in your data. It is designed around the principle that if you can query something in Elasticsearch, you can alert on it. Simply define a query, condition, schedule, and the actions to take, and -Watcher will do the rest. - +X-Pack will do the rest. diff --git a/docs/plugins/analysis-ukrainian.asciidoc b/docs/plugins/analysis-ukrainian.asciidoc new file mode 100644 index 00000000000..78f8232f1c1 --- /dev/null +++ b/docs/plugins/analysis-ukrainian.asciidoc @@ -0,0 +1,42 @@ +[[analysis-ukrainian]] +=== Ukrainian Analysis Plugin + +The Ukrainian Analysis plugin integrates Lucene's UkrainianMorfologikAnalyzer into elasticsearch. + +It provides stemming for Ukrainian using the http://github.com/morfologik/morfologik-stemming[Morfologik project]. + +[[analysis-ukrainian-install]] +[float] +==== Installation + +This plugin can be installed using the plugin manager: + +[source,sh] +---------------------------------------------------------------- +sudo bin/elasticsearch-plugin install analysis-ukrainian +---------------------------------------------------------------- + +The plugin must be installed on every node in the cluster, and each node must +be restarted after installation. + +This plugin can be downloaded for <> from +{plugin_url}/analysis-ukrainian/analysis-ukrainian-{version}.zip. + +[[analysis-ukrainian-remove]] +[float] +==== Removal + +The plugin can be removed with the following command: + +[source,sh] +---------------------------------------------------------------- +sudo bin/elasticsearch-plugin remove analysis-ukrainian +---------------------------------------------------------------- + +The node must be stopped before removing the plugin. + +[[analysis-ukrainian-analyzer]] +[float] +==== `ukrainian` analyzer + +The plugin provides the `ukrainian` analyzer. diff --git a/docs/plugins/analysis.asciidoc b/docs/plugins/analysis.asciidoc index 884dc2aebae..3c3df021de5 100644 --- a/docs/plugins/analysis.asciidoc +++ b/docs/plugins/analysis.asciidoc @@ -36,16 +36,18 @@ segmented into words. Provides high quality stemming for Polish. +<>:: + +Provides stemming for Ukrainian. + [float] ==== Community contributed analysis plugins A number of analysis plugins have been contributed by our community: -* https://github.com/yakaz/elasticsearch-analysis-combo/[Combo Analysis Plugin] (by Olivier Favre, Yakaz) * https://github.com/synhershko/elasticsearch-analysis-hebrew[Hebrew Analysis Plugin] (by Itamar Syn-Hershko) * https://github.com/medcl/elasticsearch-analysis-ik[IK Analysis Plugin] (by Medcl) * https://github.com/medcl/elasticsearch-analysis-mmseg[Mmseg Analysis Plugin] (by Medcl) -* https://github.com/chytreg/elasticsearch-analysis-morfologik[Morfologik (Polish) Analysis plugin] (by chytreg) * https://github.com/imotov/elasticsearch-analysis-morphology[Russian and English Morphological Analysis Plugin] (by Igor Motov) * https://github.com/medcl/elasticsearch-analysis-pinyin[Pinyin Analysis Plugin] (by Medcl) * https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do) @@ -62,5 +64,4 @@ include::analysis-smartcn.asciidoc[] include::analysis-stempel.asciidoc[] - - +include::analysis-ukrainian.asciidoc[] diff --git a/docs/plugins/api.asciidoc b/docs/plugins/api.asciidoc index 54edcbc7f0e..a2fbc5165ac 100644 --- a/docs/plugins/api.asciidoc +++ b/docs/plugins/api.asciidoc @@ -14,10 +14,6 @@ A number of plugins have been contributed by our community: * https://github.com/wikimedia/search-extra[Elasticsearch Trigram Accelerated Regular Expression Filter]: (by Wikimedia Foundation/Nik Everett) -* https://github.com/kzwang/elasticsearch-image[Elasticsearch Image Plugin]: - Uses https://code.google.com/p/lire/[Lire (Lucene Image Retrieval)] to allow users - to index images and search for similar images (by Kevin Wang) - * https://github.com/wikimedia/search-highlighter[Elasticsearch Experimental Highlighter]: (by Wikimedia Foundation/Nik Everett) @@ -30,7 +26,4 @@ A number of plugins have been contributed by our community: * https://github.com/codelibs/elasticsearch-taste[Elasticsearch Taste Plugin]: Mahout Taste-based Collaborative Filtering implementation (by CodeLibs Project) -* https://github.com/hadashiA/elasticsearch-flavor[Elasticsearch Flavor Plugin] using - http://mahout.apache.org/[Mahout] Collaboration filtering (by hadashiA) * https://github.com/jurgc11/es-change-feed-plugin[WebSocket Change Feed Plugin] (by ForgeRock/Chris Clifton) - diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc index a848cdd6ff1..e8e1e42f867 100644 --- a/docs/plugins/discovery-file.asciidoc +++ b/docs/plugins/discovery-file.asciidoc @@ -89,5 +89,5 @@ running on the default port: ---------------------------------------------------------------- Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in brackets with the port coming after the brackets. diff --git a/docs/plugins/discovery.asciidoc b/docs/plugins/discovery.asciidoc index 96a1c1e7b28..39afbea96dc 100644 --- a/docs/plugins/discovery.asciidoc +++ b/docs/plugins/discovery.asciidoc @@ -30,9 +30,7 @@ The File-based discovery plugin allows providing the unicast hosts list through A number of discovery plugins have been contributed by our community: -* https://github.com/grantr/elasticsearch-srv-discovery[DNS SRV Discovery Plugin] (by Grant Rodgers) * https://github.com/shikhar/eskka[eskka Discovery Plugin] (by Shikhar Bhushan) -* https://github.com/grmblfrz/elasticsearch-zookeeper[ZooKeeper Discovery Plugin] (by Sonian Inc.) * https://github.com/fabric8io/elasticsearch-cloud-kubernetes[Kubernetes Discovery Plugin] (by Jimmi Dyson, http://fabric8.io[fabric8]) include::discovery-ec2.asciidoc[] @@ -42,4 +40,3 @@ include::discovery-azure-classic.asciidoc[] include::discovery-gce.asciidoc[] include::discovery-file.asciidoc[] - diff --git a/docs/plugins/index.asciidoc b/docs/plugins/index.asciidoc index 8b1c3dd7726..ec1954a86a8 100644 --- a/docs/plugins/index.asciidoc +++ b/docs/plugins/index.asciidoc @@ -3,7 +3,7 @@ :ref: https://www.elastic.co/guide/en/elasticsearch/reference/master :guide: https://www.elastic.co/guide :version: 6.0.0-alpha1 -:lucene_version: 6.2.0 +:lucene_version: 6.3.0 :plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins [[intro]] diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index d39d02efeac..503f1274d81 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -201,4 +201,3 @@ These projects appear to have been abandoned: D3. * https://github.com/OlegKunitsyn/eslogd[eslogd]: Linux daemon that replicates events to a central Elasticsearch server in realtime ->>>>>>> 02602a3... Update integrations.asciidoc (#18915) diff --git a/docs/plugins/lang-javascript.asciidoc b/docs/plugins/lang-javascript.asciidoc index 64be9dbb033..001b7f22231 100644 --- a/docs/plugins/lang-javascript.asciidoc +++ b/docs/plugins/lang-javascript.asciidoc @@ -120,7 +120,7 @@ GET test/_search "function_score": { "script_score": { "script": { - "id": "my_script", <2> + "stored": "my_script", <2> "lang": "javascript", "params": { "factor": 2 diff --git a/docs/plugins/lang-python.asciidoc b/docs/plugins/lang-python.asciidoc index 0730f8b54b7..0e328d79f60 100644 --- a/docs/plugins/lang-python.asciidoc +++ b/docs/plugins/lang-python.asciidoc @@ -119,7 +119,7 @@ GET test/_search "function_score": { "script_score": { "script": { - "id": "my_script", <2> + "stored": "my_script", <2> "lang": "python", "params": { "factor": 2 diff --git a/docs/plugins/management.asciidoc b/docs/plugins/management.asciidoc index b48d29da3ab..243aaf47e33 100644 --- a/docs/plugins/management.asciidoc +++ b/docs/plugins/management.asciidoc @@ -1,19 +1,16 @@ [[management]] -== Management and Site Plugins +== Management Plugins -Management and site plugins offer UIs for managing and interacting with -Elasticsearch. +Management plugins offer UIs for managing and interacting with Elasticsearch. [float] === Core management plugins The core management plugins are: -link:/products/marvel[Marvel]:: +link:/products/x-pack/monitoring[X-Pack]:: -Marvel is a management and monitoring product for Elasticsearch. Marvel +X-Pack contains the management and monitoring features for Elasticsearch. It aggregates cluster wide statistics and events and offers a single interface to -view and analyze them. Marvel is free for development use but requires a -license to run in production. - - +view and analyze them. You can get a link:/subscriptions[free license] for basic +monitoring or a higher level license for more advanced needs. diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index a98c9a525d8..46789cf3f6e 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -217,7 +217,8 @@ The following settings are supported: to split the chunk into several parts, each of `buffer_size` length, and to upload each part in its own request. Note that setting a buffer size lower than `5mb` is not allowed since it will prevents the use of the - Multipart API and may result in upload errors. Defaults to `100mb`. + Multipart API and may result in upload errors. Defaults to the minimum + between `100mb` and `5%` of the heap size. `max_retries`:: diff --git a/docs/plugins/security.asciidoc b/docs/plugins/security.asciidoc index 95ba68a6f05..d113c12bfc2 100644 --- a/docs/plugins/security.asciidoc +++ b/docs/plugins/security.asciidoc @@ -8,12 +8,12 @@ Security plugins add a security layer to Elasticsearch. The core security plugins are: -link:/products/shield[Shield]:: +link:/products/x-pack/security[X-Pack]:: -Shield is the Elastic product that makes it easy for anyone to add -enterprise-grade security to their ELK stack. Designed to address the growing security -needs of thousands of enterprises using ELK today, Shield provides peace of -mind when it comes to protecting your data. +X-Pack is the Elastic product that makes it easy for anyone to add +enterprise-grade security to their Elastic Stack. Designed to address the +growing security needs of thousands of enterprises using the Elastic Stack +today, X-Pack provides peace of mind when it comes to protecting your data. [float] === Community contributed security plugins @@ -25,4 +25,3 @@ The following plugins have been contributed by our community: * https://github.com/sscarduzio/elasticsearch-readonlyrest-plugin[Readonly REST]: High performance access control for Elasticsearch native REST API (by Simone Scarduzio) - diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index b2a193dfa8c..b4aa02a7af7 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -19,6 +19,7 @@ U7321H6 analysis-kuromoji {version} The Japanese (kuromoji) Analysis plugi U7321H6 analysis-phonetic {version} The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch. U7321H6 analysis-smartcn {version} Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch. U7321H6 analysis-stempel {version} The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch. +U7321H6 analysis-ukrainian {version} The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch. U7321H6 discovery-azure-classic {version} The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism U7321H6 discovery-ec2 {version} The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism. U7321H6 discovery-file {version} Discovery file plugin enables unicast discovery from hosts stored in a file. diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index a130e66a191..dd5c1de1485 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -26,7 +26,7 @@ The result of the above delete operation is: "_type" : "tweet", "_id" : "1", "_version" : 2, - "result: deleted" + "result": "deleted" } -------------------------------------------------- diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 7d1d26d44c5..27acf29ba9b 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -421,6 +421,42 @@ version. To enable queries sent to older versions of Elasticsearch the `query` parameter is sent directly to the remote host without validation or modification. +Reindexing from a remote server uses an on-heap buffer that defaults to a +maximum size of 200mb. If the remote index includes very large documents you'll +need to use a smaller batch size. The example below sets the batch size `10` +which is very, very small. + +[source,js] +-------------------------------------------------- +POST _reindex +{ + "source": { + "remote": { + "host": "http://otherhost:9200", + "username": "user", + "password": "pass" + }, + "index": "source", + "size": 10, + "query": { + "match": { + "test": "data" + } + } + }, + "dest": { + "index": "dest" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:host] +// TEST[s/^/PUT source\n/] +// TEST[s/otherhost:9200",/\${host}"/] +// TEST[s/"username": "user",//] +// TEST[s/"password": "pass"//] + + [float] === URL Parameters diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index d6b1aa12daf..5e7830ce90c 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -198,7 +198,10 @@ Now that we have our node (and cluster) up and running, the next step is to unde Let's start with a basic health check, which we can use to see how our cluster is doing. We'll be using curl to do this but you can use any tool that allows you to make HTTP/REST calls. Let's assume that we are still on the same node where we started Elasticsearch on and open another command shell window. -To check the cluster health, we will be using the <>. Remember previously that our node HTTP endpoint is available at port `9200`: +To check the cluster health, we will be using the <>. You can +run the command below in https://www.elastic.co/guide/en/kibana/{branch}/console-kibana.html[Kibana's Console] +by clicking "VIEW IN CONSOLE" or with `curl` by clicking the "COPY AS CURL" +link below and pasting the into a terminal. [source,js] -------------------------------------------------- diff --git a/docs/reference/how-to.asciidoc b/docs/reference/how-to.asciidoc index f41c3a3bb9c..d709e17bb4e 100644 --- a/docs/reference/how-to.asciidoc +++ b/docs/reference/how-to.asciidoc @@ -17,6 +17,8 @@ made. include::how-to/general.asciidoc[] +include::how-to/recipes.asciidoc[] + include::how-to/indexing-speed.asciidoc[] include::how-to/search-speed.asciidoc[] diff --git a/docs/reference/how-to/recipes.asciidoc b/docs/reference/how-to/recipes.asciidoc new file mode 100644 index 00000000000..0bb158f88e8 --- /dev/null +++ b/docs/reference/how-to/recipes.asciidoc @@ -0,0 +1,304 @@ +[[recipes]] +== Recipes + +[float] +[[mixing-exact-search-with-stemming]] +=== Mixing exact search with stemming + +When building a search application, stemming is often a must as it is desirable +for a query on `skiing` to match documents that contain `ski` or `skis`. But +what if a user wants to search for `skiing` specifically? The typical way to do +this would be to use a <> in order to have the same +content indexed in two different ways: + +[source,js] +-------------------------------------------------- +PUT index +{ + "settings": { + "analysis": { + "analyzer": { + "english_exact": { + "tokenizer": "standard", + "filter": [ + "lowercase" + ] + } + } + } + }, + "mappings": { + "type": { + "properties": { + "body": { + "type": "text", + "analyzer": "english", + "fields": { + "exact": { + "type": "text", + "analyzer": "english_exact" + } + } + } + } + } + } +} + +PUT index/type/1 +{ + "body": "Ski resort" +} + +PUT index/type/2 +{ + "body": "A pair of skis" +} + +POST index/_refresh +-------------------------------------------------- +// CONSOLE + +With such a setup, searching for `ski` on `body` would return both documents: + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "simple_query_string": { + "fields": [ "body" ], + "query": "ski" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took": 2, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 2, + "max_score": 0.25811607, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "2", + "_score": 0.25811607, + "_source": { + "body": "A pair of skis" + } + }, + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.25811607, + "_source": { + "body": "Ski resort" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 2,/"took": "$body.took",/] + +On the other hand, searching for `ski` on `body.exact` would only return +document `1` since the analysis chain of `body.exact` does not perform +stemming. + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "simple_query_string": { + "fields": [ "body.exact" ], + "query": "ski" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took": 1, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0.25811607, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.25811607, + "_source": { + "body": "Ski resort" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 1,/"took": "$body.took",/] + +This is not something that is easy to expose to end users, as we would need to +have a way to figure out whether they are looking for an exact match or not and +redirect to the appropriate field accordingly. Also what to do if only parts of +the query need to be matched exactly while other parts should still take +stemming into account? + +Fortunately, the `query_string` and `simple_query_string` queries have a feature +that allows to solve exactly this problem: `quote_field_suffix`. It allows to +tell Elasticsearch that words that appear in between quotes should be redirected +to a different field, see below: + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query": { + "simple_query_string": { + "fields": [ "body" ], + "quote_field_suffix": ".exact", + "query": "\"ski\"" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took": 2, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 0.25811607, + "hits": [ + { + "_index": "index", + "_type": "type", + "_id": "1", + "_score": 0.25811607, + "_source": { + "body": "Ski resort" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 2,/"took": "$body.took",/] + +In that case, since `ski` was in-between quotes, it was searched on the +`body.exact` field due to the `quote_field_suffix` parameter, so only document +`1` matched. This allows users to mix exact search with stemmed search as they +like. + +[float] +[[consistent-scoring]] +=== Getting consistent scoring + +The fact that Elasticsearch operates with shards and replicas adds challenges +when it comes to having good scoring. + +[float] +==== Scores are not reproducible + +Say the same user runs the same request twice in a row and documents do not come +back in the same order both times, this is a pretty bad experience isn't it? +Unfortunately this is something that can happen if you have replicas +(`index.number_of_replicas` is greater than 0). The reason is that Elasticsearch +selects the shards that the query should go to in a round-robin fashion, so it +is quite likely if you run the same query twice in a row that it will go to +different copies of the same shard. + +Now why is it a problem? Index statistics are an important part of the score. +And these index statistics may be different across copies of the same shard +due to deleted documents. As you may know when documents are deleted or updated, +the old document is not immediately removed from the index, it is just marked +as deleted and it will only be removed from disk on the next time that the +segment this old document belongs to is merged. However for practical reasons, +those deleted documents are taken into account for index statistics. So imagine +that the primary shard just finished a large merge that removed lots of deleted +documents, then it might have index statistics that are sufficiently different +from the replica (which still have plenty of deleted documents) so that scores +are different too. + +The recommended way to work around this issue is to use a string that identifies +the user that is logged is (a user id or session id for instance) as a +<>. This ensures that all queries of a +given user are always going to hit the same shards, so scores remain more +consistent across queries. + +This work around has another benefit: when two documents have the same score, +they will be sorted by their internal Lucene doc id (which is unrelated to the +`_id` or `_uid`) by default. However these doc ids could be different across +copies of the same shard. So by always hitting the same shard, we would get +more consistent ordering of documents that have the same scores. + +[float] +==== Relevancy looks wrong + +If you notice that two documents with the same content get different scores or +that an exact match is not ranked first, then the issue might be related to +sharding. By default, Elasticsearch makes each shard responsible for producing +its own scores. However since index statistics are an important contributor to +the scores, this only works well if shards have similar index statistics. The +assumption is that since documents are routed evenly to shards by default, then +index statistics should be very similar and scoring would work as expected. +However in the event that you either + - use routing at index time, + - query multiple _indices_, + - or have too little data in your index +then there are good chances that all shards that are involved in the search +request do not have similar index statistics and relevancy could be bad. + +If you have a small dataset, the easiest way to work around this issue is to +index everything into an index that has a single shard +(`index.number_of_shards: 1`). Then index statistics will be the same for all +documents and scores will be consistent. + +Otherwise the recommended way to work around this issue is to use the +<> search type. This will make +Elasticsearch perform an inital round trip to all involved shards, asking +them for their index statistics relatively to the query, then the coordinating +node will merge those statistics and send the merged statistics alongside the +request when asking shards to perform the `query` phase, so that shards can +use these global statistics rather than their own statistics in order to do the +scoring. + +In most cases, this additional round trip should be very cheap. However in the +event that your query contains a very large number of fields/terms or fuzzy +queries, beware that gathering statistics alone might not be cheap since all +terms have to be looked up in the terms dictionaries in order to look up +statistics. + diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 1833b45a9ba..7930ed573b4 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -174,9 +174,41 @@ implementation used for these two methods, while not changing the `default`, it is possible to configure a similarity with the name `base`. This similarity will then be used for the two methods. -You can change the default similarity for all fields by putting the following setting into `elasticsearch.yml`: +You can change the default similarity for all fields in an index when +it is <>: [source,js] -------------------------------------------------- -index.similarity.default.type: classic +PUT /my_index +{ + "settings": { + "index": { + "similarity": { + "default": { + "type": "classic" + } + } + } + } +} +-------------------------------------------------- + +If you want to change the default similarity after creating the index +you must <> your index, send the follwing +request and <> it again afterwards: + +[source,js] +-------------------------------------------------- +PUT /my_index/_settings +{ + "settings": { + "index": { + "similarity": { + "default": { + "type": "classic" + } + } + } + } +} -------------------------------------------------- diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 26181a0fd80..317500b474f 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -9,7 +9,7 @@ release-state can be: released | prerelease | unreleased ////////// :release-state: unreleased -:lucene_version: 6.2.0 +:lucene_version: 6.3.0 :branch: master :jdk: 1.8.0_73 :defguide: https://www.elastic.co/guide/en/elasticsearch/guide/master diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 04e79ca5a08..d6875909216 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -95,7 +95,8 @@ over. For instance: [source,js] -------------------------------------------------- -PUT / <1> +# PUT / with URI encoding: +PUT /%3Clogs-%7Bnow%2Fd%7D-1%3E <1> { "aliases": { "logs_write": {} @@ -117,7 +118,7 @@ POST /logs_write/_rollover <2> } -------------------------------------------------- // CONSOLE -// TEST[s/\{now\//{2016.10.31||%2f/] +// TEST[s/now/2016.10.31||/] <1> Creates an index named with today's date (e.g.) `logs-2016.10.31-1` <2> Rolls over to a new index with today's date, e.g. `logs-2016.10.31-000002` if run immediately, or `logs-2016.11.01-000002` if run after 24 hours diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 905a7ffe90a..3619017141f 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -147,17 +147,18 @@ The following tables lists all the defaults ISO formats supported: `date_hour` or `strict_date_hour`:: - A formatter that combines a full date and two digit hour of day. + A formatter that combines a full date and two digit hour of day: + `yyyy-MM-dd'T'HH`. `date_hour_minute` or `strict_date_hour_minute`:: A formatter that combines a full date, two digit hour of day, and two - digit minute of hour. + digit minute of hour: `yyyy-MM-dd'T'HH:mm`. `date_hour_minute_second` or `strict_date_hour_minute_second`:: A formatter that combines a full date, two digit hour of day, two digit - minute of hour, and two digit second of minute. + minute of hour, and two digit second of minute: `yyyy-MM-dd'T'HH:mm:ss`. `date_hour_minute_second_fraction` or `strict_date_hour_minute_second_fraction`:: @@ -183,16 +184,17 @@ The following tables lists all the defaults ISO formats supported: `hour` or `strict_hour`:: - A formatter for a two digit hour of day. + A formatter for a two digit hour of day: `HH` `hour_minute` or `strict_hour_minute`:: - A formatter for a two digit hour of day and two digit minute of hour. + A formatter for a two digit hour of day and two digit minute of hour: + `HH:mm`. `hour_minute_second` or `strict_hour_minute_second`:: A formatter for a two digit hour of day, two digit minute of hour, and two - digit second of minute. + digit second of minute: `HH:mm:ss`. `hour_minute_second_fraction` or `strict_hour_minute_second_fraction`:: @@ -258,27 +260,28 @@ The following tables lists all the defaults ISO formats supported: `weekyear` or `strict_weekyear`:: - A formatter for a four digit weekyear. + A formatter for a four digit weekyear: `xxxx`. `weekyear_week` or `strict_weekyear_week`:: - A formatter for a four digit weekyear and two digit week of weekyear. + A formatter for a four digit weekyear and two digit week of weekyear: + `xxxx-'W'ww`. `weekyear_week_day` or `strict_weekyear_week_day`:: A formatter for a four digit weekyear, two digit week of weekyear, and one - digit day of week. + digit day of week: `xxxx-'W'ww-e`. `year` or `strict_year`:: - A formatter for a four digit year. + A formatter for a four digit year: `yyyy`. `year_month` or `strict_year_month`:: - A formatter for a four digit year and two digit month of year. + A formatter for a four digit year and two digit month of year: `yyyy-MM`. `year_month_day` or `strict_year_month_day`:: A formatter for a four digit year, two digit month of year, and two digit - day of month. + day of month: `yyyy-MM-dd`. diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index fa260bbeff6..7c09ef46e55 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -6,7 +6,7 @@ codes, zip codes or tags. They are typically used for filtering (_Find me all blog posts where ++status++ is ++published++_), for sorting, and for aggregations. Keyword -fields are ony searchable by their exact value. +fields are only searchable by their exact value. If you need to index full text content such as email bodies or product descriptions, it is likely that you should rather use a <> field. diff --git a/docs/reference/modules/scripting/painless-syntax.asciidoc b/docs/reference/modules/scripting/painless-syntax.asciidoc index 1191facc369..8f280de9946 100644 --- a/docs/reference/modules/scripting/painless-syntax.asciidoc +++ b/docs/reference/modules/scripting/painless-syntax.asciidoc @@ -28,6 +28,23 @@ String constants can be declared with single quotes, to avoid escaping horrors w def mystring = 'foo'; --------------------------------------------------------- +[float] +[[painless-arrays]] +==== Arrays + +Arrays can be subscripted starting from `0` for traditional array access or with +negative numbers to starting from the back of the array. So the following +returns `2`. + +[source,painless] +--------------------------------------------------------- +int[] x = new int[5]; +x[0]++; +x[-5]++; +return x[0]; +--------------------------------------------------------- + + [float] [[painless-lists]] ==== List @@ -39,11 +56,13 @@ Lists can be created explicitly (e.g. `new ArrayList()`) or initialized similar def list = [1,2,3]; --------------------------------------------------------- -Lists can also be accessed similar to arrays: they support subscript and `.length`: +Lists can also be accessed similar to arrays. They support `.length` and +subscripts, including negative subscripts to read from the back of the list: [source,painless] --------------------------------------------------------- def list = [1,2,3]; +list[-1] = 5 return list[0] --------------------------------------------------------- diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index c3af5861879..b09a54e5c46 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -8,12 +8,12 @@ the same pattern: ------------------------------------- "script": { "lang": "...", <1> - "inline" | "id" | "file": "...", <2> + "inline" | "stored" | "file": "...", <2> "params": { ... } <3> } ------------------------------------- <1> The language the script is written in, which defaults to `painless`. -<2> The script itself which may be specified as `inline`, `id`, or `file`. +<2> The script itself which may be specified as `inline`, `stored`, or `file`. <3> Any named parameters that should be passed into the script. For example, the following script is used in a search request to return a @@ -211,7 +211,7 @@ GET _scripts/groovy/calculate-score // CONSOLE // TEST[continued] -Stored scripts can be used by specifying the `lang` and `id` parameters as follows: +Stored scripts can be used by specifying the `lang` and `stored` parameters as follows: [source,js] -------------------------------------------------- @@ -221,7 +221,7 @@ GET _search "script": { "script": { "lang": "groovy", - "id": "calculate-score", + "stored": "calculate-score", "params": { "my_modifier": 2 } diff --git a/docs/reference/query-dsl/fuzzy-query.asciidoc b/docs/reference/query-dsl/fuzzy-query.asciidoc index f320e81b579..4df30dec2f1 100644 --- a/docs/reference/query-dsl/fuzzy-query.asciidoc +++ b/docs/reference/query-dsl/fuzzy-query.asciidoc @@ -24,6 +24,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:fuzzy query is deprecated. Instead use the [match] query with fuzziness parameter] Or with more advanced settings: @@ -45,6 +46,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:fuzzy query is deprecated. Instead use the [match] query with fuzziness parameter] [float] ===== Parameters diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 60477d6e28a..28d8fcffbe2 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -40,10 +40,6 @@ with default operator of `AND`, the same query is translated to |`allow_leading_wildcard` |When set, `*` or `?` are allowed as the first character. Defaults to `true`. -|`lowercase_expanded_terms` |Whether terms of wildcard, prefix, fuzzy, -and range queries are to be automatically lower-cased or not (since they -are not analyzed). Default it `true`. - |`enable_position_increments` |Set to `true` to enable position increments in result queries. Defaults to `true`. @@ -61,12 +57,12 @@ phrase matches are required. Default value is `0`. |`boost` |Sets the boost value of the query. Defaults to `1.0`. +|`auto_generate_phrase_queries` |Defaults to `false`. + |`analyze_wildcard` |By default, wildcards terms in a query string are not analyzed. By setting this value to `true`, a best effort will be made to analyze those as well. -|`auto_generate_phrase_queries` |Defaults to `false`. - |`max_determinized_states` |Limit on how many automaton states regexp queries are allowed to create. This protects against too-difficult (e.g. exponentially hard) regexps. Defaults to 10000. @@ -80,11 +76,18 @@ both>>. |`lenient` |If set to `true` will cause format based failures (like providing text to a numeric field) to be ignored. -|`locale` | Locale that should be used for string conversions. -Defaults to `ROOT`. - |`time_zone` | Time Zone to be applied to any range query related to dates. See also http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[JODA timezone]. + +|`quote_field_suffix` | A suffix to append to fields for quoted parts of +the query string. This allows to use a field that has a different analysis chain +for exact matching. Look <> for a +comprehensive example. + +|`split_on_whitespace` |Whether query text should be split on whitespace prior to analysis. + Instead the queryparser would parse around only real 'operators'. + Default to `false`. + |======================================================================= When a multi term query is being generated, one can control how it gets diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 9e847102469..9c900959196 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -61,12 +61,15 @@ they match. Leading wildcards can be disabled by setting `allow_leading_wildcard` to `false`. ======= -Wildcarded terms are not analyzed by default -- they are lowercased -(`lowercase_expanded_terms` defaults to `true`) but no further analysis -is done, mainly because it is impossible to accurately analyze a word that -is missing some of its letters. However, by setting `analyze_wildcard` to -`true`, an attempt will be made to analyze wildcarded words before searching -the term list for matching terms. +Only parts of the analysis chain that operate at the character level are +applied. So for instance, if the analyzer performs both lowercasing and +stemming, only the lowercasing will be applied: it would be wrong to perform +stemming on a word that is missing some of its letters. + +By setting `analyze_wildcard` to true, queries that end with a `*` will be +analyzed and a boolean query will be built out of the different tokens, by +ensuring exact matches on the first N-1 tokens, and prefix match on the last +token. ===== Regular expressions @@ -282,8 +285,8 @@ A space may also be a reserved character. For instance, if you have a synonym list which converts `"wi fi"` to `"wifi"`, a `query_string` search for `"wi fi"` would fail. The query string parser would interpret your query as a search for `"wi OR fi"`, while the token stored in your -index is actually `"wifi"`. Escaping the space will protect it from -being touched by the query string parser: `"wi\ fi"`. +index is actually `"wifi"`. The option `split_on_whitespace=false` will protect it from +being touched by the query string parser and will let the analysis run on the entire input (`"wi fi"`). **** ===== Empty Query diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 796f2517fea..c6f70c31416 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -44,18 +44,11 @@ creating composite queries. |`flags` |Flags specifying which features of the `simple_query_string` to enable. Defaults to `ALL`. -|`lowercase_expanded_terms` | Whether terms of prefix and fuzzy queries should -be automatically lower-cased or not (since they are not analyzed). Defaults to -`true`. - |`analyze_wildcard` | Whether terms of prefix queries should be automatically analyzed or not. If `true` a best effort will be made to analyze the prefix. However, some analyzers will be not able to provide a meaningful results based just on the prefix of a term. Defaults to `false`. -|`locale` | Locale that should be used for string conversions. -Defaults to `ROOT`. - |`lenient` | If set to `true` will cause format based failures (like providing text to a numeric field) to be ignored. @@ -63,6 +56,11 @@ Defaults to `ROOT`. document to be returned. See the <> documentation for the full list of options. + +|`quote_field_suffix` | A suffix to append to fields for quoted parts of +the query string. This allows to use a field that has a different analysis chain +for exact matching. Look <> for a +comprehensive example. |======================================================================= [float] diff --git a/docs/reference/query-dsl/template-query.asciidoc b/docs/reference/query-dsl/template-query.asciidoc index b4b00e5babd..2d3b5724d49 100644 --- a/docs/reference/query-dsl/template-query.asciidoc +++ b/docs/reference/query-dsl/template-query.asciidoc @@ -108,7 +108,7 @@ GET /_search { "query": { "template": { - "id": "my_template", <1> + "stored": "my_template", <1> "params" : { "query_string" : "all about search" } diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 859455e89b7..d4117e4e96e 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -74,9 +74,6 @@ query. |`lenient` |If set to true will cause format based failures (like providing text to a numeric field) to be ignored. Defaults to false. -|`lowercase_expanded_terms` |Should terms be automatically lowercased or -not. Defaults to `true`. - |`analyze_wildcard` |Should wildcard and prefix queries be analyzed or not. Defaults to `false`. diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 1b6560abab4..1291af702f5 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -136,10 +136,6 @@ This will yield the same result as the previous request. Should wildcard and prefix queries be analyzed or not. Defaults to false. -`lowercase_expanded_terms`:: - Should terms be automatically lowercased - or not. Defaults to true. - `lenient`:: If set to true will cause format based failures (like providing text to a numeric field) to be ignored. Defaults to false. diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index bee8e158175..82a27881720 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -40,6 +40,7 @@ should keep the ``search context'' alive (see <>), eg `?s -------------------------------------------------- POST /twitter/tweet/_search?scroll=1m { + "size": 100, "query": { "match" : { "title" : "elasticsearch" @@ -72,8 +73,10 @@ POST <1> /_search/scroll <2> for another `1m`. <4> The `scroll_id` parameter -Each call to the `scroll` API returns the next batch of results until there -are no more results left to return, ie the `hits` array is empty. +The `size` parameter allows you to configure the maximum number of hits to be +returned with each batch of results. Each call to the `scroll` API returns the +next batch of results until there are no more results left to return, ie the +`hits` array is empty. IMPORTANT: The initial search request and each subsequent scroll request returns a new `_scroll_id` -- only the most recent `_scroll_id` should be diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index 95ce6a8ff6a..6670b9f31d5 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -64,9 +64,6 @@ query. |`analyzer` |The analyzer name to be used when analyzing the query string. -|`lowercase_expanded_terms` |Should terms be automatically lowercased or -not. Defaults to `true`. - |`analyze_wildcard` |Should wildcard and prefix queries be analyzed or not. Defaults to `false`. diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 5fb4ad9b7ce..5b015f4e578 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -52,9 +52,6 @@ query. |`lenient` |If set to true will cause format based failures (like providing text to a numeric field) to be ignored. Defaults to false. -|`lowercase_expanded_terms` |Should terms be automatically lowercased or -not. Defaults to `true`. - |`analyze_wildcard` |Should wildcard and prefix queries be analyzed or not. Defaults to `false`. |======================================================================= diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 8c1bab474c8..9c2276bc213 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -23,14 +23,19 @@ documented individually. [float] === Development vs. production mode -By default, Elasticsearch binds and publishes to `localhost`. This is +By default, Elasticsearch binds to `localhost` for <> +and <> communication. This is fine for downloading and playing with Elasticsearch, and everyday -development but it's useless for production systems. For a production -installation to be reachable, it must either bind or publish to an -external interface. Thus, we consider Elasticsearch to be in development -mode if it does not bind nor publish to an external interface (the -default), and is otherwise in production mode if it does bind or publish -to an external interface. +development but it's useless for production systems. To form a cluster, +Elasticsearch instances must be reachable via transport communication so +they must bind transport to an external interface. Thus, we consider an +Elaticsearch instance to be in development mode if it does not bind +transport to an external interface (the default), and is otherwise in +production mode if it does bind transport to an external interface. Note +that HTTP can be configured independently of transport via +<> and <>; +this can be useful for configuring a single instance to be reachable via +HTTP for testing purposes without triggering production mode. === Heap size check diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index cd957addef1..80e6c1801e9 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -12,7 +12,6 @@ configured before going into production. * <> * <> * <> -* <> [float] [[path-settings]] @@ -188,29 +187,3 @@ be thrown which will prevent the node from starting. [float] [[node.max_local_storage_nodes]] -=== `node.max_local_storage_nodes` - -It is possible to start more than one node on the same server from the same -`$ES_HOME`, just by doing the following: - -[source,sh] --------------------------------------------------- -./bin/elasticsearch -d -./bin/elasticsearch -d --------------------------------------------------- - -This works just fine: the data directory structure is designed to let multiple -nodes coexist. However, a single instance of Elasticsearch is able to use all -of the resources of a single server and it seldom makes sense to run multiple -nodes on the same server in production. - -It is, however, possible to start more than one node on the same server by -mistake and to be completely unaware that this problem exists. To prevent more -than one node from sharing the same data directory, it is advisable to add the -following setting: - -[source,yaml] --------------------------------------------------- -node.max_local_storage_nodes: 1 --------------------------------------------------- - diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index f2e10176480..e3fc4afe0e1 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -9,6 +9,10 @@ The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[deb-key]] ==== Import the Elasticsearch PGP Key diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index d6c9352aac0..dda9256c2a8 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -13,6 +13,10 @@ The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[rpm-key]] ==== Import the Elasticsearch PGP Key diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 320626a2021..c9d4c4f81c5 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -10,6 +10,10 @@ link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[install-windows]] ==== Download and install the `.zip` package @@ -25,11 +29,11 @@ Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts Unzip it with your favourite unzip tool. This will create a folder called +elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal -window, `CD` to the `%ES_HOME%` directory, for instance: +window, `cd` to the `%ES_HOME%` directory, for instance: ["source","sh",subs="attributes"] ---------------------------- -CD c:\elasticsearch-{version} +cd c:\elasticsearch-{version} ---------------------------- endif::[] @@ -81,7 +85,7 @@ stop the service, all from the command-line. ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -c:\elasticsearch-{version}{backslash}bin>service +c:\elasticsearch-{version}{backslash}bin>elasticsearch-service Usage: elasticsearch-service.bat install|remove|start|stop|manager [SERVICE_ID] -------------------------------------------------- @@ -109,7 +113,7 @@ information is made available during install: ["source","sh",subs="attributes"] -------------------------------------------------- -c:\elasticsearch-{version}{backslash}bin>service install +c:\elasticsearch-{version}{backslash}bin>elasticsearch-service install Installing service : "elasticsearch-service-x64" Using JAVA_HOME (64-bit): "c:\jvm\jdk1.8" The service 'elasticsearch-service-x64' has been installed. diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index fc47214615f..9330af43b9e 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -10,6 +10,10 @@ link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. +NOTE: Elasticsearch requires Java 8 or later. Use the +http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] +or an open-source distribution such as http://openjdk.java.net[OpenJDK]. + [[install-zip]] ==== Download and install the `.zip` package @@ -185,4 +189,4 @@ directory so that you do not delete important data later on. |======================================================================= -include::next-steps.asciidoc[] \ No newline at end of file +include::next-steps.asciidoc[] diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index 60ea506a502..94aa6d4b42b 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -30,11 +30,10 @@ First, you need to include the testing dependency in your project, along with th test - org.elasticsearch - elasticsearch + org.elasticsearch.test + framework ${elasticsearch.version} test - test-jar -------------------------------------------------- diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 47ca68e00f5..0ded9530e0e 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -153,10 +153,10 @@ The new tests are run continuously in our testing farm and are passing. We are a that no failures are found. -== Unreleased +== Completed [float] -=== Port Jepsen tests dealing with loss of acknowledged writes to our testing framework (STATUS: UNRELEASED, V5.0.0) +=== Port Jepsen tests dealing with loss of acknowledged writes to our testing framework (STATUS: DONE, V5.0.0) We have increased our test coverage to include scenarios tested by Jepsen that demonstrate loss of acknowledged writes, as described in the Elasticsearch related blogs. We make heavy use of randomization to expand on the scenarios that can be tested and to introduce @@ -167,7 +167,7 @@ where the `testAckedIndexing` test was specifically added to check that we don't [float] -=== Loss of documents during network partition (STATUS: UNRELEASED, v5.0.0) +=== Loss of documents during network partition (STATUS: DONE, v5.0.0) If a network partition separates a node from the master, there is some window of time before the node detects it. The length of the window is dependent on the type of the partition. This window is extremely small if a socket is broken. More adversarial partitions, for example, silently dropping requests without breaking the socket can take longer (up to 3x30s using current defaults). @@ -175,7 +175,7 @@ If the node hosts a primary shard at the moment of partition, and ends up being To prevent this situation, the primary needs to wait for the master to acknowledge replica shard failures before acknowledging the write to the client. {GIT}14252[#14252] [float] -=== Safe primary relocations (STATUS: UNRELEASED, v5.0.0) +=== Safe primary relocations (STATUS: DONE, v5.0.0) When primary relocation completes, a cluster state is propagated that deactivates the old primary and marks the new primary as active. As cluster state changes are not applied synchronously on all nodes, there can be a time interval where the relocation target has processed the @@ -189,7 +189,7 @@ on the relocation target, each of the nodes believes the other to be the active chasing the primary being quickly sent back and forth between the nodes, potentially making them both go OOM. {GIT}12573[#12573] [float] -=== Do not allow stale shards to automatically be promoted to primary (STATUS: UNRELEASED, v5.0.0) +=== Do not allow stale shards to automatically be promoted to primary (STATUS: DONE, v5.0.0) In some scenarios, after the loss of all valid copies, a stale replica shard can be automatically assigned as a primary, preferring old data to no data at all ({GIT}14671[#14671]). This can lead to a loss of acknowledged writes if the valid copies are not lost but are rather @@ -199,7 +199,7 @@ for one of the good shard copies to reappear. In case where all good copies are stale shard copy. [float] -=== Make index creation resilient to index closing and full cluster crashes (STATUS: UNRELEASED, v5.0.0) +=== Make index creation resilient to index closing and full cluster crashes (STATUS: DONE, v5.0.0) Recovering an index requires a quorum (with an exception for 2) of shard copies to be available to allocate a primary. This means that a primary cannot be assigned if the cluster dies before enough shards have been allocated ({GIT}9126[#9126]). The same happens if an index @@ -211,7 +211,7 @@ shard will be allocated upon reopening the index. [float] -=== Use two phase commit for Cluster State publishing (STATUS: UNRELEASED, v5.0.0) +=== Use two phase commit for Cluster State publishing (STATUS: DONE, v5.0.0) A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#fault-detection[monitors the cluster nodes] and removes any node from the cluster that doesn't respond to its pings in a timely @@ -225,8 +225,6 @@ a new phase to cluster state publishing where the proposed cluster state is sent but is not yet committed. Only once enough nodes (`discovery.zen.minimum_master_nodes`) actively acknowledge the change, it is committed and commit messages are sent to the nodes. See {GIT}13062[#13062]. -== Completed - [float] === Wait on incoming joins before electing local node as master (STATUS: DONE, v2.0.0) diff --git a/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 deleted file mode 100644 index 205aaae6e66..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99764b20aba5443f8a181f7015a806443c589844 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..aadc6a31524 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +0bf61de45f8ea73a185d48572ea094f6b696a7a8 \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index 249b0080775..d7ac37f8313 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -157,7 +157,7 @@ public class SearchTemplateRequest extends ActionRequest out.writeBoolean(simulate); out.writeBoolean(explain); out.writeBoolean(profile); - ScriptType.writeTo(scriptType, out); + scriptType.writeTo(out); out.writeOptionalString(script); boolean hasParams = scriptParams != null; out.writeBoolean(hasParams); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java index 9348085ec78..41242a693dd 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/TemplateQueryBuilderTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; +import org.junit.After; import org.junit.Before; import java.io.IOException; @@ -56,6 +57,14 @@ public class TemplateQueryBuilderTests extends AbstractQueryTestCase> getPlugins() { return Arrays.asList(MustachePlugin.class, CustomScriptPlugin.class); diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml index 2360dfc37f0..cfa97b8bc9f 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml @@ -44,7 +44,7 @@ warnings: - '[template] query is deprecated, use search template api instead' search: - body: { "query": { "template": { "id": "1", "params": { "my_value": "value1" } } } } + body: { "query": { "template": { "stored": "1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } @@ -52,7 +52,7 @@ warnings: - '[template] query is deprecated, use search template api instead' search: - body: { "query": { "template": { "id": "/mustache/1", "params": { "my_value": "value1" } } } } + body: { "query": { "template": { "stored": "/mustache/1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index cd761d0ad44..6de116da0e9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -109,6 +109,10 @@ public final class Def { private static final MethodHandle LIST_SET; /** pointer to Iterable.iterator() */ private static final MethodHandle ITERATOR; + /** pointer to {@link Def#mapIndexNormalize}. */ + private static final MethodHandle MAP_INDEX_NORMALIZE; + /** pointer to {@link Def#listIndexNormalize}. */ + private static final MethodHandle LIST_INDEX_NORMALIZE; /** factory for arraylength MethodHandle (intrinsic) from Java 9 */ private static final MethodHandle JAVA9_ARRAY_LENGTH_MH_FACTORY; @@ -121,6 +125,10 @@ public final class Def { LIST_GET = lookup.findVirtual(List.class, "get", MethodType.methodType(Object.class, int.class)); LIST_SET = lookup.findVirtual(List.class, "set", MethodType.methodType(Object.class, int.class, Object.class)); ITERATOR = lookup.findVirtual(Iterable.class, "iterator", MethodType.methodType(Iterator.class)); + MAP_INDEX_NORMALIZE = lookup.findStatic(Def.class, "mapIndexNormalize", + MethodType.methodType(Object.class, Map.class, Object.class)); + LIST_INDEX_NORMALIZE = lookup.findStatic(Def.class, "listIndexNormalize", + MethodType.methodType(int.class, List.class, int.class)); } catch (final ReflectiveOperationException roe) { throw new AssertionError(roe); } @@ -522,6 +530,26 @@ public final class Def { "for class [" + receiverClass.getCanonicalName() + "]."); } + /** + * Returns a method handle to normalize the index into an array. This is what makes lists and arrays stored in {@code def} support + * negative offsets. + * @param receiverClass Class of the array to store the value in + * @return a MethodHandle that accepts the receiver as first argument, the index as second argument, and returns the normalized index + * to use with array loads and array stores + */ + static MethodHandle lookupIndexNormalize(Class receiverClass) { + if (receiverClass.isArray()) { + return ArrayIndexNormalizeHelper.arrayIndexNormalizer(receiverClass); + } else if (Map.class.isAssignableFrom(receiverClass)) { + // noop so that mymap[key] doesn't do funny things with negative keys + return MAP_INDEX_NORMALIZE; + } else if (List.class.isAssignableFrom(receiverClass)) { + return LIST_INDEX_NORMALIZE; + } + throw new IllegalArgumentException("Attempting to address a non-array-like type " + + "[" + receiverClass.getCanonicalName() + "] as an array."); + } + /** * Returns a method handle to do an array store. * @param receiverClass Class of the array to store the value in @@ -814,4 +842,62 @@ public final class Def { return ((Number)value).doubleValue(); } } + + /** + * "Normalizes" the index into a {@code Map} by making no change to the index. + */ + public static Object mapIndexNormalize(final Map value, Object index) { + return index; + } + + /** + * "Normalizes" the idnex into a {@code List} by flipping negative indexes around so they are "from the end" of the list. + */ + public static int listIndexNormalize(final List value, int index) { + return index >= 0 ? index : value.size() + index; + } + + /** + * Methods to normalize array indices to support negative indices into arrays stored in {@code def}s. + */ + @SuppressWarnings("unused") // normalizeIndex() methods are are actually used, javac just does not know :) + private static final class ArrayIndexNormalizeHelper { + private static final Lookup PRIV_LOOKUP = MethodHandles.lookup(); + + private static final Map,MethodHandle> ARRAY_TYPE_MH_MAPPING = Collections.unmodifiableMap( + Stream.of(boolean[].class, byte[].class, short[].class, int[].class, long[].class, + char[].class, float[].class, double[].class, Object[].class) + .collect(Collectors.toMap(Function.identity(), type -> { + try { + return PRIV_LOOKUP.findStatic(PRIV_LOOKUP.lookupClass(), "normalizeIndex", + MethodType.methodType(int.class, type, int.class)); + } catch (ReflectiveOperationException e) { + throw new AssertionError(e); + } + })) + ); + + private static final MethodHandle OBJECT_ARRAY_MH = ARRAY_TYPE_MH_MAPPING.get(Object[].class); + + static int normalizeIndex(final boolean[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final byte[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final short[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final int[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final long[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final char[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final float[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final double[] array, final int index) { return index >= 0 ? index : index + array.length; } + static int normalizeIndex(final Object[] array, final int index) { return index >= 0 ? index : index + array.length; } + + static MethodHandle arrayIndexNormalizer(Class arrayType) { + if (!arrayType.isArray()) { + throw new IllegalArgumentException("type must be an array"); + } + return (ARRAY_TYPE_MH_MAPPING.containsKey(arrayType)) ? + ARRAY_TYPE_MH_MAPPING.get(arrayType) : + OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); + } + + private ArrayIndexNormalizeHelper() {} + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java index 9640629cb87..307316efdf4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java @@ -32,9 +32,10 @@ import java.lang.invoke.WrongMethodTypeException; /** * Painless invokedynamic bootstrap for the call site. *

    - * Has 7 flavors (passed as static bootstrap parameters): dynamic method call, + * Has 11 flavors (passed as static bootstrap parameters): dynamic method call, * dynamic field load (getter), and dynamic field store (setter), dynamic array load, - * dynamic array store, iterator, and method reference. + * dynamic array store, iterator, method reference, unary operator, binary operator, + * shift operator, and dynamic array index normalize. *

    * When a new type is encountered at the call site, we lookup from the appropriate * whitelist, and cache with a guard. If we encounter too many types, we stop caching. @@ -69,6 +70,8 @@ public final class DefBootstrap { public static final int BINARY_OPERATOR = 8; /** static bootstrap parameter indicating a shift operator, e.g. foo >> bar */ public static final int SHIFT_OPERATOR = 9; + /** static bootstrap parameter indicating a request to normalize an index for array-like-access */ + public static final int INDEX_NORMALIZE = 10; // constants for the flags parameter of operators /** @@ -152,6 +155,8 @@ public final class DefBootstrap { return Def.lookupIterator(receiver); case REFERENCE: return Def.lookupReference(lookup, (String) args[0], receiver, name); + case INDEX_NORMALIZE: + return Def.lookupIndexNormalize(receiver); default: throw new AssertionError(); } } @@ -448,6 +453,7 @@ public final class DefBootstrap { case ARRAY_LOAD: case ARRAY_STORE: case ITERATOR: + case INDEX_NORMALIZE: if (args.length > 0) { throw new BootstrapMethodError("Illegal static bootstrap parameters for flavor: " + flavor); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index 43fd54c51a4..7e56bf49156 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -30,7 +30,6 @@ import org.objectweb.asm.commons.Method; import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Arrays; import java.util.BitSet; import java.util.Deque; import java.util.List; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index c546207b1ee..684f9a59ee2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -32,6 +32,7 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.util.BitSet; +import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -112,6 +113,7 @@ public final class WriterConstants { public static final Method DEF_TO_LONG_EXPLICIT = getAsmMethod(long.class , "DefTolongExplicit" , Object.class); public static final Method DEF_TO_FLOAT_EXPLICIT = getAsmMethod(float.class , "DefTofloatExplicit" , Object.class); public static final Method DEF_TO_DOUBLE_EXPLICIT = getAsmMethod(double.class , "DefTodoubleExplicit", Object.class); + public static final Type DEF_ARRAY_LENGTH_METHOD_TYPE = Type.getMethodType(Type.INT_TYPE, Definition.DEF_TYPE.type); /** invokedynamic bootstrap for lambda expression/method references */ public static final MethodType LAMBDA_BOOTSTRAP_TYPE = @@ -158,6 +160,9 @@ public final class WriterConstants { public static final Type OBJECTS_TYPE = Type.getType(Objects.class); public static final Method EQUALS = getAsmMethod(boolean.class, "equals", Object.class, Object.class); + public static final Type COLLECTION_TYPE = Type.getType(Collection.class); + public static final Method COLLECTION_SIZE = getAsmMethod(int.class, "size"); + private static Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { return new Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java index 71b8ccd4da1..3cff6bab08e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AStoreable.java @@ -23,8 +23,11 @@ import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.objectweb.asm.Label; +import org.objectweb.asm.Opcodes; import java.util.Objects; +import java.util.function.Consumer; /** * The super class for an expression that can store a value in local memory. @@ -100,4 +103,21 @@ abstract class AStoreable extends AExpression { * Called to store a storabable to local memory. */ abstract void store(MethodWriter writer, Globals globals); + + /** + * Writes the opcodes to flip a negative array index (meaning slots from the end of the array) into a 0-based one (meaning slots from + * the start of the array). + */ + static void writeIndexFlip(MethodWriter writer, Consumer writeGetLength) { + Label noFlip = new Label(); + // Everywhere when it says 'array' below that could also be a list + // The stack after each instruction: array, unnormalized_index + writer.dup(); // array, unnormalized_index, unnormalized_index + writer.ifZCmp(Opcodes.IFGE, noFlip); // array, unnormalized_index + writer.swap(); // negative_index, array + writer.dupX1(); // array, negative_index, array + writeGetLength.accept(writer); // array, negative_index, length + writer.visitInsn(Opcodes.IADD); // array, noralized_index + writer.mark(noFlip); // array, noralized_index + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java index 45b3ef88cd1..a6fb3cefbb1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubBrace.java @@ -60,10 +60,8 @@ final class PSubBrace extends AStoreable { @Override void write(MethodWriter writer, Globals globals) { - if (!write) { - setup(writer, globals); - load(writer, globals); - } + setup(writer, globals); + load(writer, globals); } @Override @@ -84,6 +82,7 @@ final class PSubBrace extends AStoreable { @Override void setup(MethodWriter writer, Globals globals) { index.write(writer, globals); + writeIndexFlip(writer, MethodWriter::arrayLength); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java index 2153897a000..2776fffec61 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java @@ -34,7 +34,6 @@ import java.util.Set; * Represents an array load/store or shortcut on a def type. (Internal only.) */ final class PSubDefArray extends AStoreable { - private AExpression index; PSubDefArray(Location location, AExpression index) { @@ -59,13 +58,8 @@ final class PSubDefArray extends AStoreable { @Override void write(MethodWriter writer, Globals globals) { - index.write(writer, globals); - - writer.writeDebugInfo(location); - - org.objectweb.asm.Type methodType = - org.objectweb.asm.Type.getMethodType(actual.type, Definition.DEF_TYPE.type, index.actual.type); - writer.invokeDefCall("arrayLoad", methodType, DefBootstrap.ARRAY_LOAD); + setup(writer, globals); + load(writer, globals); } @Override @@ -85,7 +79,12 @@ final class PSubDefArray extends AStoreable { @Override void setup(MethodWriter writer, Globals globals) { - index.write(writer, globals); + // Current stack: def + writer.dup(); // def, def + index.write(writer, globals); // def, def, unnormalized_index + org.objectweb.asm.Type methodType = org.objectweb.asm.Type.getMethodType( + index.actual.type, Definition.DEF_TYPE.type, index.actual.type); + writer.invokeDefCall("normalizeIndex", methodType, DefBootstrap.INDEX_NORMALIZE); // def, normalized_index } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java index c13f8235821..5b8396f72d3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java @@ -28,6 +28,7 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.WriterConstants; import java.util.Objects; import java.util.Set; @@ -87,15 +88,8 @@ final class PSubListShortcut extends AStoreable { @Override void write(MethodWriter writer, Globals globals) { - index.write(writer, globals); - - writer.writeDebugInfo(location); - - getter.write(writer); - - if (!getter.rtn.clazz.equals(getter.handle.type().returnType())) { - writer.checkCast(getter.rtn.type); - } + setup(writer, globals); + load(writer, globals); } @Override @@ -116,6 +110,9 @@ final class PSubListShortcut extends AStoreable { @Override void setup(MethodWriter writer, Globals globals) { index.write(writer, globals); + writeIndexFlip(writer, w -> { + w.invokeInterface(WriterConstants.COLLECTION_TYPE, WriterConstants.COLLECTION_SIZE); + }); } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java new file mode 100644 index 00000000000..69b40f141e2 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayLikeObjectTestCase.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.common.Nullable; +import org.hamcrest.Matcher; + +import static java.util.Collections.singletonMap; + +/** + * Superclass for testing array-like objects (arrays and lists). + */ +public abstract class ArrayLikeObjectTestCase extends ScriptTestCase { + /** + * Build the string for declaring the variable holding the array-like-object to test. So {@code int[]} for arrays and {@code List} for + * lists. + */ + protected abstract String declType(String valueType); + /** + * Build the string for calling the constructor for the array-like-object to test. So {@code new int[5]} for arrays and + * {@code [0, 0, 0, 0, 0]} or {@code [null, null, null, null, null]} for lists. + */ + protected abstract String valueCtorCall(String valueType, int size); + /** + * Matcher for the message of the out of bounds exceptions thrown for too negative or too positive offsets. + */ + protected abstract Matcher outOfBoundsExceptionMessageMatcher(int index, int size); + + private void arrayLoadStoreTestCase(boolean declareAsDef, String valueType, Object val, @Nullable Number valPlusOne) { + String declType = declareAsDef ? "def" : declType(valueType); + String valueCtorCall = valueCtorCall(valueType, 5); + String decl = declType + " x = " + valueCtorCall; + assertEquals(5, exec(decl + "; return x.length", true)); + assertEquals(val, exec(decl + "; x[ 0] = params.val; return x[ 0];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[ 0] = params.val; return x[-5];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[-5] = params.val; return x[-5];", singletonMap("val", val), true)); + + expectOutOfBounds( 6, decl + "; return x[ 6]", val); + expectOutOfBounds(-1, decl + "; return x[-6]", val); + expectOutOfBounds( 6, decl + "; x[ 6] = params.val; return 0", val); + expectOutOfBounds(-1, decl + "; x[-6] = params.val; return 0", val); + + if (valPlusOne != null) { + assertEquals(val, exec(decl + "; x[0] = params.val; x[ 0] = x[ 0]++; return x[0];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[0] = params.val; x[ 0] = x[-5]++; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0] = ++x[ 0]; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0] = ++x[-5]; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0]++ ; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[-5]++ ; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[ 0] += 1 ; return x[0];", singletonMap("val", val), true)); + assertEquals(valPlusOne, exec(decl + "; x[0] = params.val; x[-5] += 1 ; return x[0];", singletonMap("val", val), true)); + + expectOutOfBounds( 6, decl + "; return x[ 6]++", val); + expectOutOfBounds(-1, decl + "; return x[-6]++", val); + expectOutOfBounds( 6, decl + "; return ++x[ 6]", val); + expectOutOfBounds(-1, decl + "; return ++x[-6]", val); + expectOutOfBounds( 6, decl + "; x[ 6] += 1; return 0", val); + expectOutOfBounds(-1, decl + "; x[-6] += 1; return 0", val); + } + } + + private void expectOutOfBounds(int index, String script, Object val) { + IndexOutOfBoundsException e = expectScriptThrows(IndexOutOfBoundsException.class, + () -> exec(script, singletonMap("val", val), true)); + try { + assertThat(e.getMessage(), outOfBoundsExceptionMessageMatcher(index, 5)); + } catch (AssertionError ae) { + ae.addSuppressed(e); // Mark the exception we are testing as suppressed so we get its stack trace. If it has one :( + throw ae; + } + } + + public void testInts() { arrayLoadStoreTestCase(false, "int", 5, 6); } + public void testIntsInDef() { arrayLoadStoreTestCase(true, "int", 5, 6); } + public void testLongs() { arrayLoadStoreTestCase(false, "long", 5L, 6L); } + public void testLongsInDef() { arrayLoadStoreTestCase(true, "long", 5L, 6L); } + public void testShorts() { arrayLoadStoreTestCase(false, "short", (short) 5, (short) 6); } + public void testShortsInDef() { arrayLoadStoreTestCase(true, "short", (short) 5, (short) 6); } + public void testBytes() { arrayLoadStoreTestCase(false, "byte", (byte) 5, (byte) 6); } + public void testBytesInDef() { arrayLoadStoreTestCase(true, "byte", (byte) 5, (byte) 6); } + public void testFloats() { arrayLoadStoreTestCase(false, "float", 5.0f, 6.0f); } + public void testFloatsInDef() { arrayLoadStoreTestCase(true, "float", 5.0f, 6.0f); } + public void testDoubles() { arrayLoadStoreTestCase(false, "double", 5.0d, 6.0d); } + public void testDoublesInDef() { arrayLoadStoreTestCase(true, "double", 5.0d, 6.0d); } + public void testStrings() { arrayLoadStoreTestCase(false, "String", "cat", null); } + public void testStringsInDef() { arrayLoadStoreTestCase(true, "String", "cat", null); } + public void testDef() { arrayLoadStoreTestCase(true, "def", 5, null); } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java index acacc613ab3..fe2ee1683bb 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java @@ -19,11 +19,29 @@ package org.elasticsearch.painless; +import org.hamcrest.Matcher; + import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodType; -/** Tests for or operator across all types */ -public class ArrayTests extends ScriptTestCase { +import static org.hamcrest.Matchers.equalTo; + +/** Tests for working with arrays. */ +public class ArrayTests extends ArrayLikeObjectTestCase { + @Override + protected String declType(String valueType) { + return valueType + "[]"; + } + + @Override + protected String valueCtorCall(String valueType, int size) { + return "new " + valueType + "[" + size + "]"; + } + + @Override + protected Matcher outOfBoundsExceptionMessageMatcher(int index, int size) { + return equalTo(Integer.toString(index)); + } public void testArrayLengthHelper() throws Throwable { assertArrayLength(2, new int[2]); @@ -45,29 +63,6 @@ public class ArrayTests extends ScriptTestCase { .invokeExact(array)); } - public void testArrayLoadStoreInt() { - assertEquals(5, exec("def x = new int[5]; return x.length")); - assertEquals(5, exec("def x = new int[4]; x[0] = 5; return x[0];")); - } - - public void testArrayLoadStoreString() { - assertEquals(5, exec("def x = new String[5]; return x.length")); - assertEquals("foobar", exec("def x = new String[4]; x[0] = 'foobar'; return x[0];")); - } - - public void testArrayLoadStoreDef() { - assertEquals(5, exec("def x = new def[5]; return x.length")); - assertEquals(5, exec("def x = new def[4]; x[0] = 5; return x[0];")); - } - - public void testArrayCompoundInt() { - assertEquals(6, exec("int[] x = new int[5]; x[0] = 5; x[0]++; return x[0];")); - } - - public void testArrayCompoundDef() { - assertEquals(6, exec("def x = new int[5]; x[0] = 5; x[0]++; return x[0];")); - } - public void testJacksCrazyExpression1() { assertEquals(1, exec("int x; def[] y = new def[1]; x = y[0] = 1; return x;")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ListTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ListTests.java new file mode 100644 index 00000000000..1ae7ca0bc4f --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ListTests.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.hamcrest.Matcher; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +/** Tests for working with lists. */ +public class ListTests extends ArrayLikeObjectTestCase { + @Override + protected String declType(String valueType) { + return "List"; + } + + @Override + protected String valueCtorCall(String valueType, int size) { + String[] fill = new String[size]; + Arrays.fill(fill, fillValue(valueType)); + return "[" + String.join(",", fill) + "]"; + } + + private String fillValue(String valueType) { + switch (valueType) { + case "int": return "0"; + case "long": return "0L"; + case "short": return "(short) 0"; + case "byte": return "(byte) 0"; + case "float": return "0.0f"; + case "double": return "0.0"; // Double is implicit for decimal constants + default: return null; + } + } + + @Override + protected Matcher outOfBoundsExceptionMessageMatcher(int index, int size) { + if ("1.8".equals(Runtime.class.getPackage().getSpecificationVersion())) { + if (index > size) { + return equalTo("Index: " + index + ", Size: " + size); + } + Matcher matcher = equalTo(Integer.toString(index)); + // If we set -XX:-OmitStackTraceInFastThrow we wouldn't need this + matcher = anyOf(matcher, nullValue()); + return matcher; + } else { + // This exception is locale dependent so we attempt to reproduce it + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(new Object()); + } + Exception e = expectThrows(IndexOutOfBoundsException.class, () -> list.get(index)); + return equalTo(e.getMessage()); + } + } + +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/MapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/MapTests.java new file mode 100644 index 00000000000..034213e74be --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/MapTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import static java.util.Collections.singletonMap; + +/** Tests for working with maps. */ +public class MapTests extends ScriptTestCase { + private void mapAccessesTestCase(String listType) { + Object val = randomFrom("test", 1, 1.3, new Object()); + String decl = listType + " x = ['a': 1, 'b': 2, 0: 2, -5: 'slot', 123.1: 12]"; + assertEquals(5, exec(decl + "; return x.size()")); + assertEquals(2, exec(decl + "; return x[0];", true)); + assertEquals(1, exec(decl + "; return x['a'];", true)); + assertEquals(12, exec(decl + "; return x[123.1];", true)); + assertEquals(val, exec(decl + "; x[ 0] = params.val; return x[ 0];", singletonMap("val", val), true)); + assertEquals("slot", exec(decl + "; x[ 0] = params.val; return x[-5];", singletonMap("val", val), true)); + assertEquals(val, exec(decl + "; x[-5] = params.val; return x[-5];", singletonMap("val", val), true)); + } + + public void testMapInDefAccesses() { + mapAccessesTestCase("def"); + } + + public void testMapAccesses() { + mapAccessesTestCase("Map"); + } +} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 621cb07d3cd..d485f9fa00e 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -292,7 +292,8 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { .field(fieldName, queryBuilder) .endObject().bytes()); BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); - assertQueryBuilder(qbSource, queryBuilder.rewrite(indexService.newQueryShardContext())); + assertQueryBuilder(qbSource, queryBuilder.rewrite(indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }))); } @@ -476,7 +477,9 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException { XContentParser sourceParser = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent() .createParser(actual.bytes, actual.offset, actual.length); - QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser); + QueryParseContext qsc = indexService.newQueryShardContext( + randomInt(20), null, () -> { throw new UnsupportedOperationException(); }) + .newParseContext(sourceParser); assertThat(qsc.parseInnerQueryBuilder().get(), equalTo(expected)); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 207948c9215..7a45e2b15cb 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex.remote; +import org.apache.http.ContentTooLongException; import org.apache.http.HttpEntity; import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.Logger; @@ -29,6 +30,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.HeapBufferedAsyncResponseConsumer; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseListener; import org.elasticsearch.client.RestClient; @@ -37,6 +39,8 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -67,6 +71,10 @@ import static org.elasticsearch.index.reindex.remote.RemoteResponseParsers.MAIN_ import static org.elasticsearch.index.reindex.remote.RemoteResponseParsers.RESPONSE_PARSER; public class RemoteScrollableHitSource extends ScrollableHitSource { + /** + * The maximum size of the remote response to buffer. 200mb because bulks beyond 40mb tend to be slow anyway but 200mb is simply huge. + */ + private static final ByteSizeValue BUFFER_LIMIT = new ByteSizeValue(200, ByteSizeUnit.MB); private final RestClient client; private final BytesReference query; private final SearchRequest searchRequest; @@ -142,7 +150,8 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { @Override protected void doRun() throws Exception { - client.performRequestAsync(method, uri, params, entity, new ResponseListener() { + HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(BUFFER_LIMIT.bytesAsInt()); + client.performRequestAsync(method, uri, params, entity, consumer, new ResponseListener() { @Override public void onSuccess(org.elasticsearch.client.Response response) { // Restore the thread context to get the precious headers @@ -184,6 +193,9 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { } e = wrapExceptionToPreserveStatus(re.getResponse().getStatusLine().getStatusCode(), re.getResponse().getEntity(), re); + } else if (e instanceof ContentTooLongException) { + e = new IllegalArgumentException( + "Remote responded with a chunk that was too large. Use a smaller batch size.", e); } fail.accept(e); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 3a6a6dc2f68..3cc8c3c5e6f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex.remote; +import org.apache.http.ContentTooLongException; import org.apache.http.HttpEntity; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; @@ -39,10 +40,13 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.HeapBufferedAsyncResponseConsumer; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.reindex.ScrollableHitSource.Response; @@ -76,7 +80,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RemoteScrollableHitSourceTests extends ESTestCase { - private final String FAKE_SCROLL_ID = "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll"; + private static final String FAKE_SCROLL_ID = "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll"; private int retries; private ThreadPool threadPool; private SearchRequest searchRequest; @@ -429,6 +433,39 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { assertEquals(badEntityException, wrapped.getSuppressed()[0]); } + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void testTooLargeResponse() throws Exception { + ContentTooLongException tooLong = new ContentTooLongException("too long!"); + CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); + when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), + any(FutureCallback.class))).then(new Answer>() { + @Override + public Future answer(InvocationOnMock invocationOnMock) throws Throwable { + HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; + FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[2]; + + assertEquals(new ByteSizeValue(200, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + callback.failed(tooLong); + return null; + } + }); + RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient); + + AtomicBoolean called = new AtomicBoolean(); + Consumer checkResponse = r -> called.set(true); + Throwable e = expectThrows(RuntimeException.class, + () -> source.doStartNextScroll(FAKE_SCROLL_ID, timeValueMillis(0), checkResponse)); + // Unwrap the some artifacts from the test + while (e.getMessage().equals("failed")) { + e = e.getCause(); + } + // This next exception is what the user sees + assertEquals("Remote responded with a chunk that was too large. Use a smaller batch size.", e.getMessage()); + // And that exception is reported as being caused by the underlying exception returned by the client + assertSame(tooLong, e.getCause()); + assertFalse(called.get()); + } + private RemoteScrollableHitSource sourceWithMockedRemoteCall(String... paths) throws Exception { return sourceWithMockedRemoteCall(true, paths); } @@ -482,7 +519,11 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { return null; } }); + return sourceWithMockedClient(mockRemoteVersion, httpClient); + } + private RemoteScrollableHitSource sourceWithMockedClient(boolean mockRemoteVersion, CloseableHttpAsyncClient httpClient) + throws Exception { HttpAsyncClientBuilder clientBuilder = mock(HttpAsyncClientBuilder.class); when(clientBuilder.build()).thenReturn(httpClient); diff --git a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java index aea5b31f457..f32bd5dc19b 100644 --- a/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java +++ b/modules/transport-netty3/src/main/java/org/elasticsearch/transport/netty3/Netty3Utils.java @@ -99,7 +99,6 @@ public class Netty3Utils { InternalLoggerFactory.setDefaultFactory(new InternalLoggerFactory() { @Override public InternalLogger newInstance(String name) { - name = name.replace("org.jboss.netty.", "netty3.").replace("org.jboss.netty.", "netty3."); return new Netty3InternalESLogger(Loggers.getLogger(name)); } }); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 53cf1b329aa..877d50e1674 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -41,7 +41,7 @@ public class Netty4Utils { @Override public InternalLogger newInstance(final String name) { - return new Netty4InternalESLogger(name.replace("io.netty.", "netty.")); + return new Netty4InternalESLogger(name); } }); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 deleted file mode 100644 index 2a734f79a3f..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68de5f298090b92aa9a803eb4f5aed0c9104e685 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..29114cfcf70 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +39e5761c8209a6e4e940a3aec4ba57a6b631ca00 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 deleted file mode 100644 index 749cb8ecde8..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17ee76df332c0342a172790472b777086487a299 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..2ec23fb8b2d --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +c4230c40a10cbb4ad54bcbe9e4265ecb598a4c25 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 deleted file mode 100644 index 359173e0084..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d2a6b8679563d9f044eb1cee580282b20d8e149 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..27a5a67a55a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +ccd0636f0df42146b5c77cac5ec57739c9ff2893 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 deleted file mode 100644 index 66e339bfa2f..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba3fd99d1cf47d31b82817accdb199fc7a8d838d \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..a70cf1ae74f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +17b3d2f5ffd58756b6d5bdc651eb2ea461885d0a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 deleted file mode 100644 index 5cfb071f3a3..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09d2a759a765f73e2e7becbc560411469c464cfa \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..466578a5e24 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +d3d540a7225837e25cc0ed02aefb0c7763e0f832 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle new file mode 100644 index 00000000000..b3c5473a2ff --- /dev/null +++ b/plugins/analysis-ukrainian/build.gradle @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +esplugin { + description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch.' + classname 'org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' +} + +dependencies { + compile "org.apache.lucene:lucene-analyzers-morfologik:${versions.lucene}" + compile "org.carrot2:morfologik-stemming:2.1.0" + compile "org.carrot2:morfologik-fsa:2.1.0" +} + +dependencyLicenses { + mapping from: /lucene-.*/, to: 'lucene' + mapping from: /morfologik-.*/, to: 'lucene' +} + +thirdPartyAudit.excludes = [ + // we don't use the morfologik-fsa polish stemmer + 'morfologik.stemming.polish.PolishStemmer' +] diff --git a/plugins/analysis-ukrainian/licenses/lucene-LICENSE.txt b/plugins/analysis-ukrainian/licenses/lucene-LICENSE.txt new file mode 100644 index 00000000000..28b134f5f8e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-LICENSE.txt @@ -0,0 +1,475 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from unicode conversion examples available at +http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright +from those sources: + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + + +Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was +derived from Python 2.4.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/2.4.2/license/ + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from Python 3.1.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/3.1.2/license/ + +Some code in core/src/java/org/apache/lucene/util/automaton was +derived from Brics automaton sources available at +www.brics.dk/automaton/. Here is the copyright from those sources: + +/* + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton +were automatically generated with the moman/finenight FSA package. +Here is the copyright for those sources: + +# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from ICU (http://www.icu-project.org) +The full license is available here: + http://source.icu-project.org/repos/icu/icu/trunk/license.html + +/* + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +The following license applies to the Snowball stemmers: + +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2002, Richard Boulton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The following license applies to the KStemmer: + +Copyright © 2003, +Center for Intelligent Information Retrieval, +University of Massachusetts, Amherst. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. The names "Center for Intelligent Information Retrieval" and +"University of Massachusetts" must not be used to endorse or promote products +derived from this software without prior written permission. To obtain +permission, contact info@ciir.cs.umass.edu. + +THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The following license applies to the Morfologik project: + +Copyright (c) 2006 Dawid Weiss +Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Morfologik nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +The dictionary comes from Morfologik project. Morfologik uses data from +Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and +is licenced on the terms of (inter alia) LGPL and Creative Commons +ShareAlike. The part-of-speech tags were added in Morfologik project and +are not found in the data from sjp.pl. The tagset is similar to IPI PAN +tagset. + +--- + +The following license applies to the Morfeusz project, +used by org.apache.lucene.analysis.morfologik. + +BSD-licensed dictionary of Polish (SGJP) +http://sgjp.pl/morfeusz/ + +Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, + Marcin Woliński, Robert Wołosz + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/analysis-ukrainian/licenses/lucene-NOTICE.txt b/plugins/analysis-ukrainian/licenses/lucene-NOTICE.txt new file mode 100644 index 00000000000..ecf08201a5e --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-NOTICE.txt @@ -0,0 +1,191 @@ +Apache Lucene +Copyright 2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Includes software from other Apache Software Foundation projects, +including, but not limited to: + - Apache Ant + - Apache Jakarta Regexp + - Apache Commons + - Apache Xerces + +ICU4J, (under analysis/icu) is licensed under an MIT styles license +and Copyright (c) 1995-2008 International Business Machines Corporation and others + +Some data files (under analysis/icu/src/data) are derived from Unicode data such +as the Unicode Character Database. See http://unicode.org/copyright.html for more +details. + +Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is +BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/ + +The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were +automatically generated with the moman/finenight FSA library, created by +Jean-Philippe Barrette-LaPierre. This library is available under an MIT license, +see http://sites.google.com/site/rrettesite/moman and +http://bitbucket.org/jpbarrette/moman/overview/ + +The class org.apache.lucene.util.WeakIdentityMap was derived from +the Apache CXF project and is Apache License 2.0. + +The Google Code Prettify is Apache License 2.0. +See http://code.google.com/p/google-code-prettify/ + +JUnit (junit-4.10) is licensed under the Common Public License v. 1.0 +See http://junit.sourceforge.net/cpl-v10.html + +This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin +g Package (jaspell): http://jaspell.sourceforge.net/ +License: The BSD License (http://www.opensource.org/licenses/bsd-license.php) + +The snowball stemmers in + analysis/common/src/java/net/sf/snowball +were developed by Martin Porter and Richard Boulton. +The snowball stopword lists in + analysis/common/src/resources/org/apache/lucene/analysis/snowball +were developed by Martin Porter and Richard Boulton. +The full snowball package is available from + http://snowball.tartarus.org/ + +The KStem stemmer in + analysis/common/src/org/apache/lucene/analysis/en +was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst) +under the BSD-license. + +The Arabic,Persian,Romanian,Bulgarian, and Hindi analyzers (common) come with a default +stopword list that is BSD-licensed created by Jacques Savoy. These files reside in: +analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt +See http://members.unine.ch/jacques.savoy/clef/index.html. + +The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers +(common) are based on BSD-licensed reference implementations created by Jacques Savoy and +Ljiljana Dolamic. These files reside in: +analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java + +The Stempel analyzer (stempel) includes BSD-licensed software developed +by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil, +and Edmond Nolan. + +The Polish analyzer (stempel) comes with a default +stopword list that is BSD-licensed created by the Carrot2 project. The file resides +in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt. +See http://project.carrot2.org/license.html. + +The SmartChineseAnalyzer source code (smartcn) was +provided by Xiaoping Gao and copyright 2009 by www.imdict.net. + +WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/) +is derived from Unicode data such as the Unicode Character Database. +See http://unicode.org/copyright.html for more details. + +The Morfologik analyzer (morfologik) includes BSD-licensed software +developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/). + +Morfologik uses data from Polish ispell/myspell dictionary +(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia) +LGPL and Creative Commons ShareAlike. + +Morfologic includes data from BSD-licensed dictionary of Polish (SGJP) +(http://sgjp.pl/morfeusz/) + +Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original +source code for this can be found at http://www.eclipse.org/jetty/downloads.php + +=========================================================================== +Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration +=========================================================================== + +This software includes a binary and/or source version of data from + + mecab-ipadic-2.7.0-20070801 + +which can be obtained from + + http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz + +or + + http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz + +=========================================================================== +mecab-ipadic-2.7.0-20070801 Notice +=========================================================================== + +Nara Institute of Science and Technology (NAIST), +the copyright holders, disclaims all warranties with regard to this +software, including all implied warranties of merchantability and +fitness, in no event shall NAIST be liable for +any special, indirect or consequential damages or any damages +whatsoever resulting from loss of use, data or profits, whether in an +action of contract, negligence or other tortuous action, arising out +of or in connection with the use or performance of this software. + +A large portion of the dictionary entries +originate from ICOT Free Software. The following conditions for ICOT +Free Software applies to the current dictionary as well. + +Each User may also freely distribute the Program, whether in its +original form or modified, to any third party or parties, PROVIDED +that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear +on, or be attached to, the Program, which is distributed substantially +in the same form as set out herein and that such intended +distribution, if actually made, will neither violate or otherwise +contravene any of the laws and regulations of the countries having +jurisdiction over the User or the intended distribution itself. + +NO WARRANTY + +The program was produced on an experimental basis in the course of the +research and development conducted during the project and is provided +to users as so produced on an experimental basis. Accordingly, the +program is provided without any warranty whatsoever, whether express, +implied, statutory or otherwise. The term "warranty" used herein +includes, but is not limited to, any warranty of the quality, +performance, merchantability and fitness for a particular purpose of +the program and the nonexistence of any infringement or violation of +any right of any third party. + +Each user of the program will agree and understand, and be deemed to +have agreed and understood, that there is no warranty whatsoever for +the program and, accordingly, the entire risk arising from or +otherwise connected with the program is assumed by the user. + +Therefore, neither ICOT, the copyright holder, or any other +organization that participated in or was otherwise related to the +development of the program and their respective officials, directors, +officers and other employees shall be held liable for any and all +damages, including, without limitation, general, special, incidental +and consequential damages, arising out of or otherwise in connection +with the use or inability to use the program or any product, material +or result produced or otherwise obtained by using the program, +regardless of whether they have been advised of, or otherwise had +knowledge of, the possibility of such damages at any time during the +project or thereafter. Each user will be deemed to have agreed to the +foregoing by his or her commencement of use of the program. The term +"use" as used herein includes, but is not limited to, the use, +modification, copying and distribution of the program and the +production of secondary products from the program. + +In the case where the program, whether in its original form or +modified, was distributed or delivered to or received by a user from +any person, organization or entity other than ICOT, unless it makes or +grants independently of ICOT any specific warranty to the user in +writing, such person, organization or entity, will also be exempted +from and not be held liable to the user for any such damages as noted +above as far as the program is concerned. diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 new file mode 100644 index 00000000000..5ad5644d679 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.3.0-snapshot-a66a445.jar.sha1 @@ -0,0 +1 @@ +7e711a007cd1588f8118eb02803381d448ae087c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.0.jar.sha1 new file mode 100644 index 00000000000..88f43752dba --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-fsa-2.1.0.jar.sha1 @@ -0,0 +1 @@ +88e5993f73c102f378c711f6e47221b7a9e22d25 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.0.jar.sha1 new file mode 100644 index 00000000000..ec449346c7b --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/morfologik-stemming-2.1.0.jar.sha1 @@ -0,0 +1 @@ +94167b64752138a246cc33cbf1a3b0bfe5274b7c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/index/analysis/UkrainianAnalyzerProvider.java b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/index/analysis/UkrainianAnalyzerProvider.java new file mode 100644 index 00000000000..45bf27b954b --- /dev/null +++ b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/index/analysis/UkrainianAnalyzerProvider.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.uk.UkrainianMorfologikAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; + +public class UkrainianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final UkrainianMorfologikAnalyzer analyzer; + + public UkrainianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + analyzer = new UkrainianMorfologikAnalyzer(Analysis.parseStopWords(env, settings, UkrainianMorfologikAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); + analyzer.setVersion(version); + } + + @Override + public UkrainianMorfologikAnalyzer get() { + return this.analyzer; + } + + +} diff --git a/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/plugin/analysis/ukrainian/AnalysisUkrainianPlugin.java b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/plugin/analysis/ukrainian/AnalysisUkrainianPlugin.java new file mode 100644 index 00000000000..ff8425e201c --- /dev/null +++ b/plugins/analysis-ukrainian/src/main/java/org/elasticsearch/plugin/analysis/ukrainian/AnalysisUkrainianPlugin.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugin.analysis.ukrainian; + +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.index.analysis.AnalyzerProvider; +import org.elasticsearch.index.analysis.UkrainianAnalyzerProvider; +import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; +import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.Map; + +import static java.util.Collections.singletonMap; + +public class AnalysisUkrainianPlugin extends Plugin implements AnalysisPlugin { + + @Override + public Map>> getAnalyzers() { + return singletonMap("ukrainian", UkrainianAnalyzerProvider::new); + } +} diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/SimpleUkrainianAnalyzerTests.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/SimpleUkrainianAnalyzerTests.java new file mode 100644 index 00000000000..6dbc37ea4ab --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/SimpleUkrainianAnalyzerTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class SimpleUkrainianAnalyzerTests extends ESTestCase { + + public void testBasicUsage() throws Exception { + testAnalyzer("чергу", "черга"); + testAnalyzer("рухається", "рухатися"); + testAnalyzer("колу", "кола", "коло", "кіл"); + testAnalyzer("Ця п'єса у свою чергу рухається по колу.", "п'єса", "черга", "рухатися", "кола", "коло", "кіл"); + } + + private static void testAnalyzer(String source, String... expected_terms) throws IOException { + TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, new AnalysisUkrainianPlugin()); + Analyzer analyzer = analysis.indexAnalyzers.get("ukrainian").analyzer(); + TokenStream ts = analyzer.tokenStream("test", source); + CharTermAttribute term1 = ts.addAttribute(CharTermAttribute.class); + ts.reset(); + for (String expected : expected_terms) { + assertThat(ts.incrementToken(), equalTo(true)); + assertThat(term1.toString(), equalTo(expected)); + } + assertThat(ts.incrementToken(), equalTo(false)); + } + +} diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java new file mode 100644 index 00000000000..a45549c22bd --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianAnalysisTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.uk.UkrainianMorfologikAnalyzer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; + +import static org.hamcrest.Matchers.instanceOf; + +public class UkrainianAnalysisTests extends ESTestCase { + + public void testDefaultsUkranianAnalysis() throws IOException { + final TestAnalysis analysis = createTestAnalysis(new Index("test", "_na_"), Settings.EMPTY, + new AnalysisUkrainianPlugin()); + + Analyzer analyzer = analysis.indexAnalyzers.get("ukrainian").analyzer(); + MatcherAssert.assertThat(analyzer, instanceOf(UkrainianMorfologikAnalyzer.class)); + } +} diff --git a/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianClientYamlTestSuiteIT.java b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..590d3614b97 --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/java/org/elasticsearch/index/analysis/UkrainianClientYamlTestSuiteIT.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestParseException; + +import java.io.IOException; + +public class UkrainianClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public UkrainianClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws IOException, ClientYamlTestParseException { + return ESClientYamlSuiteTestCase.createParameters(0, 1); + } +} + diff --git a/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/10_basic.yaml b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/10_basic.yaml new file mode 100644 index 00000000000..48d513c140c --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/10_basic.yaml @@ -0,0 +1,18 @@ +# Integration tests for Ukrainian analyzer +--- +"Analyzer": + - do: + indices.analyze: + body: + text: колу + analyzer: ukrainian + - length: { tokens: 3 } + - match: { tokens.0.token: кола } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 4 } + - match: { tokens.1.token: коло } + - match: { tokens.1.start_offset: 0 } + - match: { tokens.1.end_offset: 4 } + - match: { tokens.2.token: кіл } + - match: { tokens.2.start_offset: 0 } + - match: { tokens.2.end_offset: 4 } diff --git a/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yaml b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yaml new file mode 100644 index 00000000000..34d8fd2fde7 --- /dev/null +++ b/plugins/analysis-ukrainian/src/test/resources/rest-api-spec/test/analysis_ukrainian/20_search.yaml @@ -0,0 +1,32 @@ +# Integration tests for Stempel analysis component +# +--- +"Index Stempel content": + - do: + indices.create: + index: test + body: + mappings: + type: + properties: + text: + type: text + analyzer: ukrainian + + - do: + index: + index: test + type: type + id: 1 + body: { "text": "Ця п'єса у свою чергу рухається по колу." } + - do: + indices.refresh: {} + + - do: + search: + index: test + body: + query: + match: + text: кола + - match: { hits.total: 1 } diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index e10fdb72ff7..72930344bbf 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -42,6 +43,7 @@ import org.junit.Before; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.function.Supplier; import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.hamcrest.Matchers.containsString; @@ -58,8 +60,11 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { mapperRegistry = new MapperRegistry( Collections.singletonMap(Murmur3FieldMapper.CONTENT_TYPE, new Murmur3FieldMapper.TypeParser()), Collections.emptyMap()); + Supplier queryShardContext = () -> { + return indexService.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; parser = new DocumentMapperParser(indexService.getIndexSettings(), indexService.mapperService(), - indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, indexService::newQueryShardContext); + indexService.getIndexAnalyzers(), indexService.similarityService(), mapperRegistry, queryShardContext); } @Override @@ -152,8 +157,11 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); IndexService indexService2x = createIndex("test_old", oldIndexSettings); + Supplier queryShardContext = () -> { + return indexService2x.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); + }; DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), indexService2x.getIndexAnalyzers(), - indexService2x.similarityService(), mapperRegistry, indexService2x::newQueryShardContext); + indexService2x.similarityService(), mapperRegistry, queryShardContext); DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, defaultMapper.mappingSource().string()); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java index 51b5eae57ae..ba2011c276e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java @@ -43,7 +43,7 @@ import java.util.concurrent.ConcurrentHashMap; */ public class AzureStorageServiceMock extends AbstractComponent implements AzureStorageService { - protected Map blobs = new ConcurrentHashMap<>(); + protected final Map blobs = new ConcurrentHashMap<>(); public AzureStorageServiceMock() { super(Settings.EMPTY); @@ -94,7 +94,7 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS @Override public Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) { MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - for (String blobName : blobs.keySet()) { + blobs.forEach((String blobName, ByteArrayOutputStream bos) -> { final String checkBlob; if (keyPath != null && !keyPath.isEmpty()) { // strip off key path from the beginning of the blob name @@ -103,9 +103,9 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS checkBlob = blobName; } if (prefix == null || startsWithIgnoreCase(checkBlob, prefix)) { - blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, blobs.get(blobName).size())); + blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, bos.size())); } - } + }); return blobsBuilder.immutableMap(); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 34e4d78f8cf..7310b527158 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -101,14 +102,27 @@ public class S3Repository extends BlobStoreRepository { */ Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope); + + /** + * Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of + * the available memory for smaller heaps. + */ + ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue( + Math.max( + ByteSizeUnit.MB.toBytes(5), // minimum value + Math.min( + ByteSizeUnit.MB.toBytes(100), + JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)), + ByteSizeUnit.BYTES); + /** * repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and * to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the - * use of the Multipart API and may result in upload errors. Defaults to 100m. + * use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size. */ Setting BUFFER_SIZE_SETTING = - Setting.byteSizeSetting("repositories.s3.buffer_size", new ByteSizeValue(100, ByteSizeUnit.MB), + Setting.byteSizeSetting("repositories.s3.buffer_size", DEFAULT_BUFFER_SIZE, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope); /** * repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. @@ -195,12 +209,13 @@ public class S3Repository extends BlobStoreRepository { * @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING */ Setting SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false); + /** * buffer_size * @see Repositories#BUFFER_SIZE_SETTING */ Setting BUFFER_SIZE_SETTING = - Setting.byteSizeSetting("buffer_size", new ByteSizeValue(100, ByteSizeUnit.MB), + Setting.byteSizeSetting("buffer_size", Repositories.DEFAULT_BUFFER_SIZE, new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); /** * max_retries diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 915183888b6..14595d13448 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import static org.elasticsearch.repositories.s3.S3Repository.Repositories; import static org.elasticsearch.repositories.s3.S3Repository.Repository; @@ -117,4 +118,12 @@ public class S3RepositoryTests extends ESTestCase { assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added } + public void testDefaultBufferSize() { + ByteSizeValue defaultBufferSize = S3Repository.Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY); + assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB))); + assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB))); + + ByteSizeValue defaultNodeBufferSize = S3Repository.Repositories.BUFFER_SIZE_SETTING.get(Settings.EMPTY); + assertEquals(defaultBufferSize, defaultNodeBufferSize); + } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index ab4f00492b0..0078c61898d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; @@ -27,7 +28,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.io.BufferedReader; import java.io.IOException; +import java.io.StringReader; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; @@ -43,6 +46,7 @@ public class RemovePluginCommandTests extends ESTestCase { private Path home; private Environment env; + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -130,8 +134,24 @@ public class RemovePluginCommandTests extends ESTestCase { assertThat(terminal.getOutput(), not(containsString(expectedConfigDirPreservedMessage(configDir)))); } + public void testRemoveUninstalledPluginErrors() throws Exception { + UserException e = expectThrows(UserException.class, () -> removePlugin("fake", home)); + assertEquals(ExitCodes.CONFIG, e.exitCode); + assertEquals("plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins", e.getMessage()); + + MockTerminal terminal = new MockTerminal(); + new RemovePluginCommand().main(new String[] { "-Epath.home=" + home, "fake" }, terminal); + try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) { + assertEquals("-> Removing fake...", reader.readLine()); + assertEquals("ERROR: plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins", + reader.readLine()); + assertNull(reader.readLine()); + } + } + private String expectedConfigDirPreservedMessage(final Path configDir) { return "-> Preserving plugin config files [" + configDir + "] in case of upgrade, delete manually if not needed"; } } + diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index fd54c5fadbe..34621802f55 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -69,14 +69,14 @@ public class TribeUnitTests extends ESTestCase { .build(); final List> mockPlugins = Arrays.asList(MockTcpTransportPlugin.class, MockZenPing.TestPlugin.class); - tribe1 = new TribeClientNode( + tribe1 = new MockNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build(), mockPlugins).start(); - tribe2 = new TribeClientNode( + tribe2 = new MockNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index bc27de7f624..f263adbadc8 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -90,7 +90,14 @@ configurations { } repositories { - mavenCentral() + mavenCentral() // Try maven central first, it'll have releases before 5.0.0 + /* Setup a repository that tries to download from + https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext] + which should work for 5.0.0+. This isn't a real ivy repository but gradle + is fine with that */ + ivy { + artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]" + } } dependencies { @@ -125,10 +132,7 @@ Set getVersions() { new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> xml = new XmlParser().parse(s) } - - // List all N-1 releases from maven central - int major = Integer.parseInt(project.version.substring(0, project.version.indexOf('.'))) - 1 - Set versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /$major\.\d\.\d/ }) + Set versions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /[5]\.\d\.\d/ }) if (versions.isEmpty() == false) { return versions; } @@ -151,9 +155,10 @@ task verifyPackagingTestUpgradeFromVersions { String maybeUpdateFromVersions = System.getProperty("tests.packaging.upgrade.from.versions", null) if (maybeUpdateFromVersions == null) { Set versions = getVersions() - Set actualVersions = new HashSet<>(Arrays.asList(upgradeFromVersions)) + Set actualVersions = new TreeSet<>(Arrays.asList(upgradeFromVersions)) if (!versions.equals(actualVersions)) { - throw new GradleException("out-of-date versions [" + actualVersions + "], expected [" + versions + "]; run gradle updatePackagingTestUpgradeFromVersions") + throw new GradleException("out-of-date versions " + actualVersions + + ", expected " + versions + "; run gradle updatePackagingTestUpgradeFromVersions") } } } @@ -247,7 +252,7 @@ for (String box : availableBoxes) { Task up = tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { boxName box - /* Its important that we try to reprovision the box even if it already + /* It's important that we try to reprovision the box even if it already exists. That way updates to the vagrant configuration take automatically. That isn't to say that the updates will always be compatible. Its ok to just destroy the boxes if they get busted but that is a manual step diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats index 7f9ce21e85d..83c12f960e5 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats @@ -73,6 +73,13 @@ setup() { verify_archive_installation } +@test "[TAR] verify elasticsearch-plugin list runs without any plugins installed" { + # previously this would fail because the archive installations did + # not create an empty plugins directory + local plugins_list=`$ESHOME/bin/elasticsearch-plugin list` + [[ -z $plugins_list ]] +} + @test "[TAR] elasticsearch fails if java executable is not found" { local JAVA=$(which java) diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats index d0361339bf6..d435a76b9c7 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats @@ -74,6 +74,11 @@ setup() { verify_package_installation } +@test "[DEB] verify elasticsearch-plugin list runs without any plugins installed" { + local plugins_list=`$ESHOME/bin/elasticsearch-plugin list` + [[ -z $plugins_list ]] +} + @test "[DEB] elasticsearch isn't started by package install" { # Wait a second to give Elasticsearch a change to start if it is going to. # This isn't perfect by any means but its something. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats index 5535d1a67ce..b6ec78509d1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -73,6 +73,11 @@ setup() { verify_package_installation } +@test "[RPM] verify elasticsearch-plugin list runs without any plugins installed" { + local plugins_list=`$ESHOME/bin/elasticsearch-plugin list` + [[ -z $plugins_list ]] +} + @test "[RPM] elasticsearch isn't started by package install" { # Wait a second to give Elasticsearch a change to start if it is going to. # This isn't perfect by any means but its something. diff --git a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash index d5dc095e70f..b979f40e309 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/module_and_plugin_test_cases.bash @@ -205,6 +205,10 @@ fi install_and_check_plugin analysis stempel } +@test "[$GROUP] install ukrainian plugin" { + install_and_check_plugin analysis ukrainian morfologik-fsa-*.jar morfologik-stemming-*.jar +} + @test "[$GROUP] install gce plugin" { install_and_check_plugin discovery gce google-api-client-*.jar } @@ -341,6 +345,10 @@ fi remove_plugin analysis-stempel } +@test "[$GROUP] remove ukrainian plugin" { + remove_plugin analysis-ukrainian +} + @test "[$GROUP] remove gce plugin" { remove_plugin discovery-gce } @@ -428,38 +436,21 @@ fi sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output # exclude progress line local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) - if [ "$GROUP" == "TAR PLUGINS" ]; then - # tar extraction does not create the plugins directory so the plugin tool will print an additional line that the directory will be created - [ "$loglines" -eq "3" ] || { - echo "Expected 3 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - else - [ "$loglines" -eq "2" ] || { - echo "Expected 2 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - fi + [ "$loglines" -eq "2" ] || { + echo "Expected 2 lines excluding progress bar but the output had $loglines lines and was:" + cat /tmp/plugin-cli-output + false + } remove_jvm_example local relativePath=${1:-$(readlink -m jvm-example-*.zip)} sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) - if [ "$GROUP" == "TAR PLUGINS" ]; then - [ "$loglines" -gt "3" ] || { - echo "Expected more than 3 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - else - [ "$loglines" -gt "2" ] || { - echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" - cat /tmp/plugin-cli-output - false - } - fi + [ "$loglines" -gt "2" ] || { + echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" + cat /tmp/plugin-cli-output + false + } remove_jvm_example } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash index 02cae2aeecb..55e7fdfc484 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash @@ -129,20 +129,28 @@ install_and_check_plugin() { shift if [ "$prefix" == "-" ]; then - local fullName="$name" + local full_name="$name" else - local fullName="$prefix-$name" + local full_name="$prefix-$name" fi - install_jvm_plugin $fullName "$(readlink -m $fullName-*.zip)" + install_jvm_plugin $full_name "$(readlink -m $full_name-*.zip)" - assert_module_or_plugin_directory "$ESPLUGINS/$fullName" + assert_module_or_plugin_directory "$ESPLUGINS/$full_name" + # analysis plugins have a corresponding analyzers jar if [ $prefix == 'analysis' ]; then - assert_module_or_plugin_file "$ESPLUGINS/$fullName/lucene-analyzers-$name-*.jar" + local analyzer_jar_suffix=$name + # the name of the analyzer jar for the ukrainian plugin does + # not match the name of the plugin, so we have to make an + # exception + if [ $name == 'ukrainian' ]; then + analyzer_jar_suffix='morfologik' + fi + assert_module_or_plugin_file "$ESPLUGINS/$full_name/lucene-analyzers-$analyzer_jar_suffix-*.jar" fi for file in "$@"; do - assert_module_or_plugin_file "$ESPLUGINS/$fullName/$file" + assert_module_or_plugin_file "$ESPLUGINS/$full_name/$file" done } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash index 3d6210a2ea3..b5edebaf41c 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -89,6 +89,7 @@ verify_archive_installation() { assert_file "$ESCONFIG/elasticsearch.yml" f elasticsearch elasticsearch 660 assert_file "$ESCONFIG/jvm.options" f elasticsearch elasticsearch 660 assert_file "$ESCONFIG/log4j2.properties" f elasticsearch elasticsearch 660 + assert_file "$ESPLUGINS" d elasticsearch elasticsearch 755 assert_file "$ESHOME/lib" d elasticsearch elasticsearch 755 assert_file "$ESHOME/NOTICE.txt" f elasticsearch elasticsearch 644 assert_file "$ESHOME/LICENSE.txt" f elasticsearch elasticsearch 644 diff --git a/qa/vagrant/versions b/qa/vagrant/versions index 654a95a3a25..0062ac97180 100644 --- a/qa/vagrant/versions +++ b/qa/vagrant/versions @@ -1 +1 @@ -6.0.0-alpha1-SNAPSHOT +5.0.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json index c4d0dcd5f49..1b3c1266a63 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json @@ -12,6 +12,10 @@ "type" : "string", "description" : "a short version of the Accept header, e.g. json, yaml" }, + "full_id": { + "type" : "boolean", + "description" : "Return the full node ID instead of the shortened version (default: false)" + }, "local": { "type" : "boolean", "description" : "Return local information, do not retrieve the state from master node (default: false)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index f1a6a98217c..06828a6588a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -41,7 +41,7 @@ "wait_for_events": { "type" : "enum", "options" : ["immediate", "urgent", "high", "normal", "low", "languid"], - "description" : "Wait until all currently queued events with the given priorty are processed" + "description" : "Wait until all currently queued events with the given priority are processed" }, "wait_for_no_relocating_shards": { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 9048f982712..0e2697cd524 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -67,10 +67,6 @@ "lenient": { "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" - }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index f97492aa7ab..39998fb87fe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -65,10 +65,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 328794ffdd0..0f0d8c132b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -49,10 +49,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "parent": { "type" : "string", "description" : "The ID of the parent document" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json index f83cf255165..1e2413ee723 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json @@ -12,6 +12,10 @@ } }, "params": { + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, "expand_wildcards": { "type" : "enum", "options" : ["open","closed","none","all"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json index 98af689833a..7a0977da194 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json @@ -63,10 +63,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "rewrite": { "type": "boolean", "description": "Provide a more detailed explanation showing the actual Lucene query that will be executed." diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 5aa7a409a06..2cf359ede16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -72,10 +72,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "preference": { "type" : "string", "description" : "Specify the node or shard the operation should be performed on (default: random)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 4b9e76ac59f..afa2a79570c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -65,10 +65,6 @@ "type" : "boolean", "description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored" }, - "lowercase_expanded_terms": { - "type" : "boolean", - "description" : "Specify whether query terms should be lowercased" - }, "pipeline": { "type" : "string", "description" : "Ingest pipeline to set on index requests made by this action. (default: none)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml index 63670061b6d..fc7eb456892 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yaml @@ -219,6 +219,10 @@ --- "Alias sorting": + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 + - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml index 0b3cdba46a4..3e900132273 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yaml @@ -160,6 +160,10 @@ --- "Test cat indices sort": + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 + - do: indices.create: index: foo diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml index 391a7c1e6d1..9f4de56c863 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yaml @@ -57,3 +57,28 @@ - match: $body: | /^ http \n ((\d{1,3}\.){3}\d{1,3}:\d{1,5}\n)+ $/ + +--- +"Test cat nodes output with full_id set": + - skip: + version: " - 5.0.0" + reason: The full_id setting was rejected in 5.0.0 see #21266 + + + - do: + cat.nodes: + h: id + # check for a 4 char non-whitespace character string + - match: + $body: | + /^(\S{4}\n)+$/ + + - do: + cat.nodes: + h: id + full_id: true + # check for a 5+ char non-whitespace character string + - match: + $body: | + /^(\S{5,}\n)+$/ + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml index 2345df9732e..6d83274726e 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yaml @@ -46,7 +46,9 @@ --- "Test cat repositories sort": - + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 - do: snapshot.create_repository: repository: test_cat_repo_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index fc077ba6529..5529d4f5799 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -227,6 +227,9 @@ --- "Test cat shards sort": + - skip: + version: " - 5.0.99" + reason: sorting was introduced in 5.1.0 - do: indices.create: @@ -253,11 +256,11 @@ - do: cat.shards: - h: [index, store] - s: [store] + h: [index, docs] + s: [docs] - - match: + - match: # don't use the store here it's cached and might be stale $body: | - /^ foo \s+ (\d+|\d+[.]\d+)(kb|b)\n - bar \s+ (\d+|\d+[.]\d+)(kb|b)\n + /^ foo \s+ 0\n + bar \s+ 1\n $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml index 28a7e1f3bee..eb651f6b157 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml @@ -1,6 +1,8 @@ --- "Help": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: cat.templates: help: true @@ -15,7 +17,9 @@ --- "No templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: cat.templates: {} @@ -26,7 +30,9 @@ --- "Normal templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -72,7 +78,9 @@ --- "Filtered templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -111,7 +119,9 @@ --- "Column headers": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -145,7 +155,9 @@ --- "Select columns": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test @@ -176,7 +188,9 @@ --- "Sort templates": - + - skip: + version: " - 5.0.99" + reason: templates were introduced in 5.1.0 - do: indices.put_template: name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml index 933033761e9..70f402691a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/count/20_query_string.yaml @@ -58,15 +58,6 @@ count: index: test q: field:BA* - lowercase_expanded_terms: false - - - match: {count : 0} - - - do: - count: - index: test - q: field:BA* - analyze_wildcard: true - match: {count : 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml index 30fe6cc55b6..78ef8c4bc89 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/30_query_string.yaml @@ -68,17 +68,6 @@ type: test id: 1 q: field:BA* - lowercase_expanded_terms: false - - - is_false: matched - - - do: - explain: - index: test - type: test - id: 1 - q: field:BA* - analyze_wildcard: true - is_true: matched diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml index e255ce510ed..c6631b83b18 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yaml @@ -86,3 +86,4 @@ id: 1 version: 1 version_type: external_gte + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml index e986d3e931a..98af719bbe0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yaml @@ -71,6 +71,22 @@ - match: { hits.total: 1 } - match: { hits.hits.0._index: "logs-000002"} +--- +"Rollover no condition matched": + - skip: + version: " - 5.0.0" + reason: bug fixed in 5.0.1 + + # create index with alias + - do: + indices.create: + index: logs-1 + wait_for_active_shards: 1 + body: + aliases: + logs_index: {} + logs_search: {} + # run again and verify results without rolling over - do: indices.rollover: @@ -78,11 +94,11 @@ wait_for_active_shards: 1 body: conditions: - max_docs: 100 + max_docs: 1 - - match: { old_index: logs-000002 } - - match: { new_index: logs-000003 } + - match: { old_index: logs-1 } + - match: { new_index: logs-000002 } - match: { rolled_over: false } - match: { dry_run: false } - - match: { conditions: { "[max_docs: 100]": false } } + - match: { conditions: { "[max_docs: 1]": false } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml index 9569728ce7d..62a75b0ff04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yaml @@ -1,15 +1,26 @@ --- "Shrink index via API": - # creates an index with one document. - # relocates all it's shards to one node - # shrinks it into a new index with a single shard + # creates an index with one document solely allocated on the master node + # and shrinks it into a new index with a single shard + # we don't do the relocation to a single node after the index is created + # here since in a mixed version cluster we can't identify + # which node is the one with the highest version and that is the only one that can safely + # be used to shrink the index. + - do: + cluster.state: {} + # Get master node id + + - set: { master_node: master } + - do: indices.create: index: source wait_for_active_shards: 1 body: settings: - number_of_replicas: "0" + # ensure everything is allocated on a single node + index.routing.allocation.include._id: $master + number_of_replicas: 0 - do: index: index: source @@ -28,18 +39,11 @@ - match: { _id: "1" } - match: { _source: { foo: "hello world" } } - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - # relocate everything to the master node and make it read-only + # make it read-only - do: indices.put_settings: index: source body: - index.routing.allocation.include._id: $master index.blocks.write: true index.number_of_replicas: 0 @@ -47,8 +51,6 @@ cluster.health: wait_for_status: green index: source - wait_for_no_relocating_shards: true - wait_for_events: "languid" # now we do the actual shrink - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml index e696a5600bc..9d2245b4b40 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yaml @@ -9,14 +9,80 @@ index: number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - do: indices.upgrade: index: test_index - match: {upgraded_indices.test_index.oldest_lucene_segment_version: '/(\d\.)+\d/'} - is_true: upgraded_indices.test_index.upgrade_version + +--- +"Upgrade indices ignore unavailable": + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + indices.upgrade: + index: ["does_not_exist", "test_index"] + ignore_unavailable: true + + - match: {_shards.total: 1} + - is_true: upgraded_indices.test_index.upgrade_version + - is_false: upgraded_indices.does_not_exist + +--- +"Upgrade indices allow no indices": + + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + indices.upgrade: + index: test_index + ignore_unavailable: true + allow_no_indices: true + + - match: {_shards.total: 0} + +--- +"Upgrade indices disallow no indices": + + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + catch: missing + indices.upgrade: + index: test_index + ignore_unavailable: true + allow_no_indices: false + +--- +"Upgrade indices disallow unavailable": + + - skip: + version: " - 5.0.0" + reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 + + - do: + indices.create: + index: test_index + + - do: + catch: missing + indices.upgrade: + index: ["test_index", "does_not_exist"] + ignore_unavailable: false + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml index c6dd323aa6c..3f96009c12a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string.yaml @@ -49,15 +49,6 @@ indices.validate_query: index: test q: field:BA* - lowercase_expanded_terms: false - - - is_true: valid - - - do: - indices.validate_query: - index: test - q: field:BA* - analyze_wildcard: true - is_true: valid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yaml new file mode 100644 index 00000000000..2152e75f7e6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yaml @@ -0,0 +1,80 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + post: + properties: + mentions: + type: keyword + user: + properties: + notifications: + type: keyword + + - do: + index: + index: test + type: test + id: foo|bar|baz0 + body: { "notifications" : ["abc"] } + + - do: + index: + index: test + type: test + id: foo|bar|baz1 + body: { "mentions" : ["abc"] } + + - do: + indices.refresh: {} + +--- +"Filter aggs with terms lookup ensure not cached": + - skip: + version: " - 5.0.0" + reason: This using filter aggs that needs rewriting, this was fixed in 5.0.1 + + - do: + search: + size: 0 + request_cache: true + body: {"aggs": { "itemsNotify": { "filter": { "terms": { "mentions": { "index": "test", "type": "test", "id": "foo|bar|baz0", "path": "notifications"}}}, "aggs": { "mentions" : {"terms" : { "field" : "mentions" }}}}}} + + # validate result + - match: { hits.total: 2 } + - match: { aggregations.itemsNotify.doc_count: 1 } + - length: { aggregations.itemsNotify.mentions.buckets: 1 } + - match: { aggregations.itemsNotify.mentions.buckets.0.key: "abc" } + # we are using a lookup - this should not cache + - do: + indices.stats: { index: test, metric: request_cache} + - match: { _shards.total: 1 } + - match: { _all.total.request_cache.hit_count: 0 } + - match: { _all.total.request_cache.miss_count: 0 } + - is_true: indices.test + +--- +"Filter aggs no lookup and ensure it's cached": + # now run without lookup and ensure we get cached or at least do the lookup + - do: + search: + size: 0 + request_cache: true + body: {"aggs": { "itemsNotify": { "filter": { "terms": { "mentions": ["abc"]}}, "aggs": { "mentions" : {"terms" : { "field" : "mentions" }}}}}} + + - match: { hits.total: 2 } + - match: { aggregations.itemsNotify.doc_count: 1 } + - length: { aggregations.itemsNotify.mentions.buckets: 1 } + - match: { aggregations.itemsNotify.mentions.buckets.0.key: "abc" } + - do: + indices.stats: { index: test, metric: request_cache} + - match: { _shards.total: 1 } + - match: { _all.total.request_cache.hit_count: 0 } + - match: { _all.total.request_cache.miss_count: 1 } + - is_true: indices.test + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml index 6fb93bb1044..8533cfd2668 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/60_query_string.yaml @@ -58,15 +58,6 @@ search: index: test q: field:BA* - lowercase_expanded_terms: false - - - match: {hits.total: 0} - - - do: - search: - index: test - q: field:BA* - analyze_wildcard: true - match: {hits.total: 1} diff --git a/settings.gradle b/settings.gradle index 4f249de4391..eda0ec8658a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -37,6 +37,7 @@ List projects = [ 'plugins:analysis-phonetic', 'plugins:analysis-smartcn', 'plugins:analysis-stempel', + 'plugins:analysis-ukrainian', 'plugins:discovery-azure-classic', 'plugins:discovery-ec2', 'plugins:discovery-file', diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 739371d76df..47051d9072d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -146,7 +146,7 @@ public abstract class ESAllocationTestCase extends ESTestCase { ClusterState lastClusterState; do { lastClusterState = clusterState; - logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint()); + logger.debug("ClusterState: {}", clusterState.getRoutingNodes()); clusterState = service.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); } while (lastClusterState.equals(clusterState) == false); return clusterState; diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index 3be4cd3edf4..efdf10d5a5d 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -19,7 +19,7 @@ package org.elasticsearch.ingest; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; @@ -41,7 +41,7 @@ public final class RandomDocumentPicks { * path to refer to a field name using the dot notation. */ public static String randomFieldName(Random random) { - int numLevels = RandomInts.randomIntBetween(random, 1, 5); + int numLevels = RandomNumbers.randomIntBetween(random, 1, 5); String fieldName = ""; for (int i = 0; i < numLevels; i++) { if (i > 0) { @@ -169,7 +169,7 @@ public final class RandomDocumentPicks { } private static Object randomFieldValue(Random random, int currentDepth) { - switch(RandomInts.randomIntBetween(random, 0, 9)) { + switch(RandomNumbers.randomIntBetween(random, 0, 9)) { case 0: return randomString(random); case 1: @@ -180,28 +180,28 @@ public final class RandomDocumentPicks { return random.nextDouble(); case 4: List stringList = new ArrayList<>(); - int numStringItems = RandomInts.randomIntBetween(random, 1, 10); + int numStringItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numStringItems; j++) { stringList.add(randomString(random)); } return stringList; case 5: List intList = new ArrayList<>(); - int numIntItems = RandomInts.randomIntBetween(random, 1, 10); + int numIntItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numIntItems; j++) { intList.add(random.nextInt()); } return intList; case 6: List booleanList = new ArrayList<>(); - int numBooleanItems = RandomInts.randomIntBetween(random, 1, 10); + int numBooleanItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numBooleanItems; j++) { booleanList.add(random.nextBoolean()); } return booleanList; case 7: List doubleList = new ArrayList<>(); - int numDoubleItems = RandomInts.randomIntBetween(random, 1, 10); + int numDoubleItems = RandomNumbers.randomIntBetween(random, 1, 10); for (int j = 0; j < numDoubleItems; j++) { doubleList.add(random.nextDouble()); } @@ -211,7 +211,7 @@ public final class RandomDocumentPicks { addRandomFields(random, newNode, ++currentDepth); return newNode; case 9: - byte[] byteArray = new byte[RandomInts.randomIntBetween(random, 1, 1024)]; + byte[] byteArray = new byte[RandomNumbers.randomIntBetween(random, 1, 1024)]; random.nextBytes(byteArray); return byteArray; default: @@ -230,7 +230,7 @@ public final class RandomDocumentPicks { if (currentDepth > 5) { return; } - int numFields = RandomInts.randomIntBetween(random, 1, 10); + int numFields = RandomNumbers.randomIntBetween(random, 1, 10); for (int i = 0; i < numFields; i++) { String fieldName = randomLeafFieldName(random); Object fieldValue = randomFieldValue(random, currentDepth); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 6eb28bea14a..38e8a8436b1 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.discovery.zen.UnicastHostsProvider; +import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -33,6 +35,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.test.discovery.MockZenPing; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -96,6 +99,21 @@ public class MockNode extends Node { } } + @Override + protected ZenPing newZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, + UnicastHostsProvider hostsProvider) { + if (getPluginsService().filterPlugins(MockZenPing.TestPlugin.class).isEmpty()) { + return super.newZenPing(settings, threadPool, transportService, hostsProvider); + } else { + return new MockZenPing(settings); + } + } + + @Override + protected Node newTribeClientNode(Settings settings, Collection> classpathPlugins) { + return new MockNode(settings, classpathPlugins); + } + @Override protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { if (false == getPluginsService().filterPlugins(RecoverySettingsChunkSizePlugin.class).isEmpty()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index bc02f20197e..e65e0ab4de7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -52,11 +52,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -123,10 +125,12 @@ import java.util.concurrent.ExecutionException; import static java.util.Collections.emptyList; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; public abstract class AbstractQueryTestCase> extends ESTestCase { @@ -155,6 +159,10 @@ public abstract class AbstractQueryTestCase> private static String[] currentTypes; private static String[] randomTypes; + /** + * used to check warning headers of the deprecation logger + */ + private ThreadContext threadContext; protected static Index getIndex() { return index; @@ -209,6 +217,20 @@ public abstract class AbstractQueryTestCase> serviceHolder = new ServiceHolder(nodeSettings, indexSettings, getPlugins(), this); } serviceHolder.clientInvocationHandler.delegate = this; + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(threadContext); + } + + /** + * Check that there are no unaccounted warning headers. These should be checked with {@link #checkWarningHeaders(String...)} in the + * appropriate test + */ + @After + public void teardown() throws IOException { + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertNull("unexpected warning headers", warnings); + DeprecationLogger.removeThreadContext(this.threadContext); + this.threadContext.close(); } private static SearchContext getSearchContext(String[] types, QueryShardContext context) { @@ -1009,6 +1031,23 @@ public abstract class AbstractQueryTestCase> return query; } + protected void checkWarningHeaders(String... messages) { + final List warnings = threadContext.getResponseHeaders().get(DeprecationLogger.DEPRECATION_HEADER); + assertThat(warnings, hasSize(messages.length)); + for (String msg : messages) { + assertThat(warnings, hasItem(equalTo(msg))); + } + // "clear" current warning headers by setting a new ThreadContext + DeprecationLogger.removeThreadContext(this.threadContext); + try { + this.threadContext.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + this.threadContext = new ThreadContext(Settings.EMPTY); + DeprecationLogger.setThreadContext(this.threadContext); + } + private static class ServiceHolder implements Closeable { private final IndicesQueriesRegistry indicesQueriesRegistry; diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index 3c5f105e4d1..b739099cff0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -18,7 +18,7 @@ package org.elasticsearch.test;/* */ import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -208,10 +208,10 @@ public class BackgroundIndexer implements AutoCloseable { } private XContentBuilder generateSource(long id, Random random) throws IOException { - int contentLength = RandomInts.randomIntBetween(random, minFieldSize, maxFieldSize); + int contentLength = RandomNumbers.randomIntBetween(random, minFieldSize, maxFieldSize); StringBuilder text = new StringBuilder(contentLength); while (text.length() < contentLength) { - int tokenLength = RandomInts.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10)); + int tokenLength = RandomNumbers.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10)); text.append(" ").append(RandomStrings.randomRealisticUnicodeOfCodepointLength(random, tokenLength)); } XContentBuilder builder = XContentFactory.smileBuilder(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index f247c56636c..3e3896dfc2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -34,6 +34,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.concurrent.CountDownLatch; import static junit.framework.TestCase.fail; @@ -53,12 +54,12 @@ public class ClusterServiceUtils { clusterService.setLocalNode(localNode); clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override - public void connectToAddedNodes(ClusterChangedEvent event) { + public void connectToNodes(List addedNodes) { // skip } @Override - public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + public void disconnectFromNodes(List removedNodes) { // skip } }); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 1f2f01cc352..7f0af14f93a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -20,7 +20,7 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.annotations.TestGroup; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; import org.apache.lucene.util.IOUtils; @@ -164,6 +164,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BooleanSupplier; import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -433,7 +435,7 @@ public abstract class ESIntegTestCase extends ESTestCase { if (randomBoolean()) { // keep this low so we don't stall tests - builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), RandomInts.randomIntBetween(random, 1, 15) + "ms"); + builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 1, 15) + "ms"); } return builder; @@ -446,8 +448,8 @@ public abstract class ESIntegTestCase extends ESTestCase { } switch (random.nextInt(4)) { case 3: - final int maxThreadCount = RandomInts.randomIntBetween(random, 1, 4); - final int maxMergeCount = RandomInts.randomIntBetween(random, maxThreadCount, maxThreadCount + 4); + final int maxThreadCount = RandomNumbers.randomIntBetween(random, 1, 4); + final int maxMergeCount = RandomNumbers.randomIntBetween(random, maxThreadCount, maxThreadCount + 4); builder.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount); builder.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), maxThreadCount); break; @@ -458,7 +460,7 @@ public abstract class ESIntegTestCase extends ESTestCase { private static Settings.Builder setRandomIndexTranslogSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { - builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)); + builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)); } if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)); // just don't flush @@ -468,14 +470,14 @@ public abstract class ESIntegTestCase extends ESTestCase { } if (random.nextBoolean()) { - builder.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), RandomInts.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS); + builder.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS); } return builder; } private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable() { + return RandomizedContext.current().runWithPrivateRandomness(seed, new Callable() { @Override public TestCluster call() throws Exception { return buildTestCluster(scope, seed); @@ -535,12 +537,11 @@ public abstract class ESIntegTestCase extends ESTestCase { for (Discovery discovery : internalCluster().getInstances(Discovery.class)) { if (discovery instanceof ZenDiscovery) { final ZenDiscovery zenDiscovery = (ZenDiscovery) discovery; - assertBusy(new Runnable() { - @Override - public void run() { - assertThat("still having pending states: " + Strings.arrayToDelimitedString(zenDiscovery.pendingClusterStates(), "\n"), - zenDiscovery.pendingClusterStates(), emptyArray()); - } + assertBusy(() -> { + final ClusterState[] states = zenDiscovery.pendingClusterStates(); + assertThat(zenDiscovery.localNode().getName() + " still having pending states:\n" + + Stream.of(states).map(ClusterState::toString).collect(Collectors.joining("\n")), + states, emptyArray()); }); } } @@ -574,7 +575,7 @@ public abstract class ESIntegTestCase extends ESTestCase { return Collections.emptySet(); } - protected void beforeIndexDeletion() { + protected void beforeIndexDeletion() throws IOException { cluster().beforeIndexDeletion(); } @@ -760,17 +761,14 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public void waitNoPendingTasksOnAll() throws Exception { assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); - assertBusy(new Runnable() { - @Override - public void run() { - for (Client client : clients()) { - ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); - assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); - PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get(); - assertThat("client " + client + " still has pending tasks " + pendingTasks.prettyPrint(), pendingTasks, Matchers.emptyIterable()); - clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); - assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); - } + assertBusy(() -> { + for (Client client : clients()) { + ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); + assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); + PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get(); + assertThat("client " + client + " still has pending tasks " + pendingTasks, pendingTasks, Matchers.emptyIterable()); + clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); + assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); } }); assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); @@ -874,7 +872,8 @@ public abstract class ESIntegTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("ensureGreen timed out, cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); fail("timed out waiting for green state"); } assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -901,7 +900,8 @@ public abstract class ESIntegTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(request).actionGet(); if (actionGet.isTimedOut()) { - logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); } if (status != null) { @@ -998,7 +998,8 @@ public abstract class ESIntegTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest(indices).waitForNoRelocatingShards(true).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("ensureYellow timed out, cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false)); } logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices); @@ -1009,7 +1010,8 @@ public abstract class ESIntegTestCase extends ESTestCase { * Prints the current cluster state as debug logging. */ public void logClusterState() { - logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.debug("cluster state:\n{}\n{}", + client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get()); } /** @@ -1110,7 +1112,7 @@ public abstract class ESIntegTestCase extends ESTestCase { if (clusterHealthResponse.isTimedOut()) { ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get(); fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n" - + stateResponse.getState().prettyPrint()); + + stateResponse.getState()); } assertThat(clusterHealthResponse.isTimedOut(), is(false)); } @@ -1974,6 +1976,7 @@ public abstract class ESIntegTestCase extends ESTestCase { try { INSTANCE.printTestMessage("cleaning up after"); INSTANCE.afterInternal(true); + checkStaticState(); } finally { INSTANCE = null; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index f096e662f4a..9648eb5798e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -318,7 +318,8 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForNoRelocatingShards(true)).actionGet(); if (actionGet.isTimedOut()) { - logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); + logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), + client().admin().cluster().preparePendingClusterTasks().get()); assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false)); } assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 9f6f17b80df..38eeb69d179 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -25,7 +25,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; @@ -237,6 +237,11 @@ public abstract class ESTestCase extends LuceneTestCase { @After public final void ensureCleanedUp() throws Exception { + checkStaticState(); + } + + // separate method so that this can be checked again after suite scoped cluster is shut down + protected static void checkStaticState() throws Exception { MockPageCacheRecycler.ensureAllPagesAreReleased(); MockBigArrays.ensureAllArraysAreReleased(); // field cache should NEVER get loaded. @@ -288,7 +293,7 @@ public abstract class ESTestCase extends LuceneTestCase { * @see #scaledRandomIntBetween(int, int) */ public static int randomIntBetween(int min, int max) { - return RandomInts.randomIntBetween(random(), min, max); + return RandomNumbers.randomIntBetween(random(), min, max); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index ef86c45b59f..6840eb7faec 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -21,7 +21,7 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.SysGlobals; -import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.logging.log4j.Logger; @@ -31,8 +31,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; @@ -63,6 +65,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -87,6 +91,8 @@ import org.elasticsearch.node.service.NodeService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.MockTransportClient; @@ -249,7 +255,7 @@ public final class InternalTestCluster extends TestCluster { boolean useDedicatedMasterNodes = randomlyAddDedicatedMasters ? random.nextBoolean() : false; - this.numSharedDataNodes = RandomInts.randomIntBetween(random, minNumDataNodes, maxNumDataNodes); + this.numSharedDataNodes = RandomNumbers.randomIntBetween(random, minNumDataNodes, maxNumDataNodes); assert this.numSharedDataNodes >= 0; if (numSharedDataNodes == 0) { @@ -267,7 +273,7 @@ public final class InternalTestCluster extends TestCluster { this.numSharedDedicatedMasterNodes = 0; } if (numClientNodes < 0) { - this.numSharedCoordOnlyNodes = RandomInts.randomIntBetween(random, DEFAULT_MIN_NUM_CLIENT_NODES, DEFAULT_MAX_NUM_CLIENT_NODES); + this.numSharedCoordOnlyNodes = RandomNumbers.randomIntBetween(random, DEFAULT_MIN_NUM_CLIENT_NODES, DEFAULT_MAX_NUM_CLIENT_NODES); } else { this.numSharedCoordOnlyNodes = numClientNodes; } @@ -321,14 +327,14 @@ public final class InternalTestCluster extends TestCluster { // Some tests make use of scripting quite a bit, so increase the limit for integration tests builder.put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000); if (TEST_NIGHTLY) { - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 5, 10)); - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 5, 10)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 5, 10)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 5, 10)); } else if (random.nextInt(100) <= 90) { - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 2, 5)); - builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 2, 5)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 2, 5)); + builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 2, 5)); } // always reduce this - it can make tests really slow - builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50))); + builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomNumbers.randomIntBetween(random, 20, 50))); defaultSettings = builder.build(); executor = EsExecutors.newScaling("test runner", 0, Integer.MAX_VALUE, 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName), new ThreadContext(Settings.EMPTY)); } @@ -396,7 +402,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS)); + builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomNumbers.randomIntBetween(random, 10, 30), TimeUnit.SECONDS)); } if (random.nextInt(10) == 0) { @@ -406,9 +412,9 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { - builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); + builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { @@ -417,21 +423,21 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here - builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); + builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { - builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); + builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { - builder.put(TcpTransport.PING_SCHEDULE.getKey(), RandomInts.randomIntBetween(random, 100, 2000) + "ms"); + builder.put(TcpTransport.PING_SCHEDULE.getKey(), RandomNumbers.randomIntBetween(random, 100, 2000) + "ms"); } if (random.nextBoolean()) { - builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), RandomInts.randomIntBetween(random, 0, 2000)); + builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), RandomNumbers.randomIntBetween(random, 0, 2000)); } if (random.nextBoolean()) { - builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000)).getStringRep()); + builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomNumbers.randomIntBetween(random, 750, 10000000)).getStringRep()); } return builder.build(); @@ -1013,7 +1019,7 @@ public final class InternalTestCluster extends TestCluster { } @Override - public void beforeIndexDeletion() { + public void beforeIndexDeletion() throws IOException { // Check that the operations counter on index shard has reached 0. // The assumption here is that after a test there are no ongoing write operations. // test that have ongoing write operations after the test (for example because ttl is used @@ -1048,13 +1054,30 @@ public final class InternalTestCluster extends TestCluster { } } - private void assertShardIndexCounter() { + private void assertShardIndexCounter() throws IOException { final Collection nodesAndClients = nodes.values(); for (NodeAndClient nodeAndClient : nodesAndClients) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { - assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getActiveOperationsCount(), equalTo(0)); + int activeOperationsCount = indexShard.getActiveOperationsCount(); + if (activeOperationsCount > 0) { + TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager(); + DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode(); + List taskInfos = taskManager.getTasks().values().stream() + .filter(task -> task instanceof ReplicationTask) + .map(task -> task.taskInfo(localNode, true)) + .collect(Collectors.toList()); + ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); + XContentBuilder builder = XContentFactory.jsonBuilder() + .prettyPrint() + .startObject() + .value(response) + .endObject(); + throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + + nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + + builder.string()); + } } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java index fe46251e3ee..7f43c9de61b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/OldIndexUtils.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; @@ -56,7 +57,10 @@ import static junit.framework.TestCase.assertFalse; import static junit.framework.TestCase.assertTrue; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; public class OldIndexUtils { @@ -103,10 +107,36 @@ public class OldIndexUtils { throw new IllegalStateException("Backwards index must contain exactly one cluster"); } - // the bwc scripts packs the indices under this path - Path src = list[0].resolve("nodes/0/indices/" + indexName); - assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); - copyIndex(logger, src, indexName, paths); + final Path src = getIndexDir(logger, indexName, indexFile, list[0]); + copyIndex(logger, src, src.getFileName().toString(), paths); + } + + public static Path getIndexDir( + final Logger logger, + final String indexName, + final String indexFile, + final Path dataDir) throws IOException { + final Version version = Version.fromString(indexName.substring("index-".length())); + if (version.before(Version.V_5_0_0_alpha1)) { + // the bwc scripts packs the indices under this path + Path src = dataDir.resolve("nodes/0/indices/" + indexName); + assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src)); + return src; + } else { + final List indexFolders = new ArrayList<>(); + try (DirectoryStream stream = Files.newDirectoryStream(dataDir.resolve("0/indices"), + (p) -> p.getFileName().toString().startsWith("extra") == false)) { // extra FS can break this... + for (final Path path : stream) { + indexFolders.add(path); + } + } + assertThat(indexFolders.toString(), indexFolders.size(), equalTo(1)); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolders.get(0)); + assertNotNull(indexMetaData); + assertThat(indexFolders.get(0).getFileName().toString(), equalTo(indexMetaData.getIndexUUID())); + assertThat(indexMetaData.getCreationVersion(), equalTo(version)); + return indexFolders.get(0); + } } public static void assertNotUpgraded(Client client, String... index) throws Exception { @@ -128,10 +158,10 @@ public class OldIndexUtils { } // randomly distribute the files from src over dests paths - public static void copyIndex(final Logger logger, final Path src, final String indexName, final Path... dests) throws IOException { + public static void copyIndex(final Logger logger, final Path src, final String folderName, final Path... dests) throws IOException { Path destinationDataPath = dests[randomInt(dests.length - 1)]; for (Path dest : dests) { - Path indexDir = dest.resolve(indexName); + Path indexDir = dest.resolve(folderName); assertFalse(Files.exists(indexDir)); Files.createDirectories(indexDir); } @@ -140,7 +170,7 @@ public class OldIndexUtils { public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { Path relativeDir = src.relativize(dir); for (Path dest : dests) { - Path destDir = dest.resolve(indexName).resolve(relativeDir); + Path destDir = dest.resolve(folderName).resolve(relativeDir); Files.createDirectories(destDir); } return FileVisitResult.CONTINUE; @@ -155,7 +185,7 @@ public class OldIndexUtils { } Path relativeFile = src.relativize(file); - Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile); + Path destFile = destinationDataPath.resolve(folderName).resolve(relativeFile); logger.trace("--> Moving {} to {}", relativeFile, destFile); Files.move(file, destFile); assertFalse(Files.exists(file)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 124960fe921..b960685777e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -82,7 +82,7 @@ public abstract class TestCluster implements Closeable { /** * Assertions that should run before the cluster is wiped should be called in this method */ - public void beforeIndexDeletion() { + public void beforeIndexDeletion() throws IOException { } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index e4c0e1d5abc..1e86f940a11 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -53,7 +53,6 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; @@ -99,7 +98,7 @@ public class TestSearchContext extends SearchContext { this.fixedBitSetFilterCache = indexService.cache().bitsetFilterCache(); this.threadPool = threadPool; this.indexShard = indexService.getShardOrNull(0); - queryShardContext = indexService.newQueryShardContext(); + queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L); } public TestSearchContext(QueryShardContext queryShardContext) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java index 3e02b9de0fb..d5e7de1d9bf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/MockZenPing.java @@ -20,6 +20,7 @@ package org.elasticsearch.test.discovery; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -40,7 +41,10 @@ import java.util.stream.Collectors; * A {@link ZenPing} implementation which returns results based on an static in-memory map. This allows pinging * to be immediate and can be used to speed up tests. */ -public final class MockZenPing extends AbstractLifecycleComponent implements ZenPing { +public final class MockZenPing extends AbstractComponent implements ZenPing { + + /** A marker plugin used by {@link org.elasticsearch.node.MockNode} to indicate this mock zen ping should be used. */ + public static class TestPlugin extends Plugin {} static final Map> activeNodesPerCluster = ConcurrentCollections.newConcurrentMap(); @@ -52,8 +56,11 @@ public final class MockZenPing extends AbstractLifecycleComponent implements Zen } @Override - public void setPingContextProvider(PingContextProvider contextProvider) { + public void start(PingContextProvider contextProvider) { this.contextProvider = contextProvider; + assert contextProvider != null; + boolean added = getActiveNodesForCurrentCluster().add(this); + assert added; } @Override @@ -75,33 +82,14 @@ public final class MockZenPing extends AbstractLifecycleComponent implements Zen return new PingResponse(clusterState.nodes().getLocalNode(), clusterState.nodes().getMasterNode(), clusterState); } - @Override - protected void doStart() { - assert contextProvider != null; - boolean added = getActiveNodesForCurrentCluster().add(this); - assert added; - } - private Set getActiveNodesForCurrentCluster() { return activeNodesPerCluster.computeIfAbsent(getClusterName(), clusterName -> ConcurrentCollections.newConcurrentSet()); } @Override - protected void doStop() { + public void close() { boolean found = getActiveNodesForCurrentCluster().remove(this); assert found; } - - @Override - protected void doClose() { - - } - - public static class TestPlugin extends Plugin implements DiscoveryPlugin { - - public void onModule(DiscoveryModule discoveryModule) { - discoveryModule.addZenPing(MockZenPing.class); - } - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java index f7094d8ae9f..de57eee6937 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java @@ -58,6 +58,14 @@ public class NetworkDisruption implements ServiceDisruptionScheme { this.networkLinkDisruptionType = networkLinkDisruptionType; } + public DisruptedLinks getDisruptedLinks() { + return disruptedLinks; + } + + public NetworkLinkDisruptionType getNetworkLinkDisruptionType() { + return networkLinkDisruptionType; + } + @Override public void applyToCluster(InternalTestCluster cluster) { this.cluster = cluster; @@ -143,6 +151,11 @@ public class NetworkDisruption implements ServiceDisruptionScheme { return (MockTransportService) cluster.getInstance(TransportService.class, node); } + @Override + public String toString() { + return "network disruption (disruption type: " + networkLinkDisruptionType + ", disrupted links: " + disruptedLinks + ")"; + } + /** * Represents a set of nodes with connections between nodes that are to be disrupted */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index 27756b1d852..da8c54396df 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -19,6 +19,7 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -31,11 +32,13 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestPath; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.junit.BeforeClass; import java.io.IOException; import java.net.URI; @@ -58,6 +61,8 @@ public class ClientYamlTestClient { //query_string params that don't need to be declared in the spec, they are supported by default private static final Set ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path"); + private static boolean loggedInit = false; + private final ClientYamlSuiteRestSpec restSpec; private final RestClient restClient; private final Version esVersion; @@ -66,34 +71,84 @@ public class ClientYamlTestClient { assert hosts.size() > 0; this.restSpec = restSpec; this.restClient = restClient; - this.esVersion = readAndCheckVersion(hosts); + Tuple versionTuple = readMasterAndMinNodeVersion(hosts.size()); + this.esVersion = versionTuple.v1(); + Version masterVersion = versionTuple.v2(); + if (false == loggedInit) { + /* This will be logged once per suite which lines up with randomized runner's dumping the output of all failing suites. It'd + * be super noisy to log this once per test. We can't log it in a @BeforeClass method because we need the class variables. */ + logger.info("initializing client, minimum es version: [{}] master version: [{}] hosts: {}", esVersion, masterVersion, hosts); + loggedInit = true; + } } - private Version readAndCheckVersion(List hosts) throws IOException { + /** + * Reset {@link #loggedInit} so we log the connection setup before this suite. + */ + @BeforeClass + public static void clearLoggedInit() { + loggedInit = false; + } + + private Tuple readMasterAndMinNodeVersion(int numHosts) throws IOException { + try { + // we simply go to the _cat/nodes API and parse all versions in the cluster + Response response = restClient.performRequest("GET", "/_cat/nodes", Collections.singletonMap("h", "version,master")); + ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); + String nodesCatResponse = restTestResponse.getBodyAsString(); + String[] split = nodesCatResponse.split("\n"); + Version version = null; + Version masterVersion = null; + for (String perNode : split) { + final String[] versionAndMaster = perNode.split(" "); + assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length; + final Version currentVersion = Version.fromString(versionAndMaster[0]); + final boolean master = versionAndMaster[1].trim().equals("*"); + if (master) { + assert masterVersion == null; + masterVersion = currentVersion; + } + if (version == null) { + version = currentVersion; + } else if (version.onOrAfter(currentVersion)) { + version = currentVersion; + } + } + return new Tuple<>(version, masterVersion); + } catch (ResponseException ex) { + if (ex.getResponse().getStatusLine().getStatusCode() == 403) { + logger.warn("Fallback to simple info '/' request, _cat/nodes is not authorized"); + final Version version = readAndCheckVersion(numHosts); + return new Tuple<>(version, version); + } + throw ex; + } + } + + private Version readAndCheckVersion(int numHosts) throws IOException { ClientYamlSuiteRestApi restApi = restApi("info"); assert restApi.getPaths().size() == 1; assert restApi.getMethods().size() == 1; - - String version = null; - for (HttpHost ignored : hosts) { + Version version = null; + for (int i = 0; i < numHosts; i++) { //we don't really use the urls here, we rely on the client doing round-robin to touch all the nodes in the cluster String method = restApi.getMethods().get(0); String endpoint = restApi.getPaths().get(0); Response response = restClient.performRequest(method, endpoint); ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); + Object latestVersion = restTestResponse.evaluate("version.number"); if (latestVersion == null) { throw new RuntimeException("elasticsearch version not found in the response"); } + final Version currentVersion = Version.fromString(restTestResponse.evaluate("version.number").toString()); if (version == null) { - version = latestVersion.toString(); - } else { - if (!latestVersion.equals(version)) { - throw new IllegalArgumentException("provided nodes addresses run different elasticsearch versions"); - } + version = currentVersion; + } else if (version.onOrAfter(currentVersion)) { + version = currentVersion; } } - return Version.fromString(version); + return version; } public Version getEsVersion() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 2e29721f06e..f44558d7567 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -108,7 +108,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { for (String entry : blacklist) { this.blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry)); } - + } @Override @@ -267,27 +267,16 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { restTestExecutionContext.clear(); //skip test if the whole suite (yaml file) is disabled - assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getSetupSection().getSkipSection()), + assumeFalse(testCandidate.getSetupSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion())); //skip test if the whole suite (yaml file) is disabled - assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getTeardownSection().getSkipSection()), + assumeFalse(testCandidate.getTeardownSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext.esVersion())); //skip test if test section is disabled - assumeFalse(buildSkipMessage(testCandidate.getTestPath(), testCandidate.getTestSection().getSkipSection()), + assumeFalse(testCandidate.getTestSection().getSkipSection().getSkipMessage(testCandidate.getTestPath()), testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion())); } - private static String buildSkipMessage(String description, SkipSection skipSection) { - StringBuilder messageBuilder = new StringBuilder(); - if (skipSection.isVersionCheck()) { - messageBuilder.append("[").append(description).append("] skipped, reason: [").append(skipSection.getReason()).append("] "); - } else { - messageBuilder.append("[").append(description).append("] skipped, reason: features ") - .append(skipSection.getFeatures()).append(" not supported"); - } - return messageBuilder.toString(); - } - public void test() throws IOException { //let's check that there is something to run, otherwise there might be a problem with the test section if (testCandidate.getTestSection().getExecutableSections().size() == 0) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java index b6e8ad6c0f4..b6b6adfd037 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSectionParser.java @@ -36,16 +36,17 @@ public class ClientYamlTestSectionParser implements ClientYamlTestFragmentParser try { parser.nextToken(); testSection.setSkipSection(parseContext.parseSkipSection()); - + while ( parser.currentToken() != XContentParser.Token.END_ARRAY) { parseContext.advanceToFieldName(); testSection.addExecutableSection(parseContext.parseExecutableSection()); } - + parser.nextToken(); - assert parser.currentToken() == XContentParser.Token.END_OBJECT; + assert parser.currentToken() == XContentParser.Token.END_OBJECT : "malformed section [" + testSection.getName() + "] expected " + + XContentParser.Token.END_OBJECT + " but was " + parser.currentToken(); parser.nextToken(); - + return testSection; } catch (Exception e) { throw new ClientYamlTestParseException("Error parsing test named [" + testSection.getName() + "]", e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java index 31451dee247..b73edf7d2c6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParser.java @@ -70,9 +70,6 @@ public class SkipSectionParser implements ClientYamlTestFragmentParser features; private final String reason; - + private SkipSection() { this.lowerVersion = null; this.upperVersion = null; @@ -49,7 +49,6 @@ public class SkipSection { public SkipSection(String versionRange, List features, String reason) { assert features != null; - assert versionRange != null && features.isEmpty() || versionRange == null && features.isEmpty() == false; Version[] versions = parseVersionRange(versionRange); this.lowerVersion = versions[0]; this.upperVersion = versions[1]; @@ -60,7 +59,7 @@ public class SkipSection { public Version getLowerVersion() { return lowerVersion; } - + public Version getUpperVersion() { return upperVersion; } @@ -77,11 +76,10 @@ public class SkipSection { if (isEmpty()) { return false; } - if (isVersionCheck()) { - return currentVersion.onOrAfter(lowerVersion) && currentVersion.onOrBefore(upperVersion); - } else { - return Features.areAllSupported(features) == false; - } + boolean skip = lowerVersion != null && upperVersion != null && currentVersion.onOrAfter(lowerVersion) + && currentVersion.onOrBefore(upperVersion); + skip |= Features.areAllSupported(features) == false; + return skip; } public boolean isVersionCheck() { @@ -91,7 +89,7 @@ public class SkipSection { public boolean isEmpty() { return EMPTY.equals(this); } - + private Version[] parseVersionRange(String versionRange) { if (versionRange == null) { return new Version[] { null, null }; @@ -111,4 +109,16 @@ public class SkipSection { upper.isEmpty() ? Version.CURRENT : Version.fromString(upper) }; } + + public String getSkipMessage(String description) { + StringBuilder messageBuilder = new StringBuilder(); + messageBuilder.append("[").append(description).append("] skipped,"); + if (reason != null) { + messageBuilder.append(" reason: [").append(getReason()).append("]"); + } + if (features.isEmpty() == false) { + messageBuilder.append(" unsupported features ").append(getFeatures()); + } + return messageBuilder.toString(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index cef12bc930a..dfa30874221 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -58,9 +58,12 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.atomic.AtomicBoolean; /** * A mock transport service that allows to simulate different network topology failures. @@ -95,7 +98,7 @@ public final class MockTransportService extends TransportService { /** * Build the service. - * + * * @param clusterSettings if non null the the {@linkplain TransportService} will register with the {@link ClusterSettings} for settings * updates for {@link #TRACE_LOG_EXCLUDE_SETTING} and {@link #TRACE_LOG_INCLUDE_SETTING}. */ @@ -142,7 +145,10 @@ public final class MockTransportService extends TransportService { * Clears the rule associated with the provided transport address. */ public void clearRule(TransportAddress transportAddress) { - transport().transports.remove(transportAddress); + Transport transport = transport().transports.remove(transportAddress); + if (transport instanceof ClearableTransport) { + ((ClearableTransport) transport).clearRule(); + } } /** @@ -292,7 +298,9 @@ public final class MockTransportService extends TransportService { public void addUnresponsiveRule(TransportAddress transportAddress, final TimeValue duration) { final long startTime = System.currentTimeMillis(); - addDelegate(transportAddress, new DelegateTransport(original) { + addDelegate(transportAddress, new ClearableTransport(original) { + private final Queue requestsToSendWhenCleared = new LinkedBlockingDeque(); + private boolean cleared = false; TimeValue getDelay() { return new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime)); @@ -362,7 +370,9 @@ public final class MockTransportService extends TransportService { final TransportRequest clonedRequest = reg.newRequest(); clonedRequest.readFrom(bStream.bytes().streamInput()); - threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { + Runnable runnable = new AbstractRunnable() { + AtomicBoolean requestSent = new AtomicBoolean(); + @Override public void onFailure(Exception e) { logger.debug("failed to send delayed request", e); @@ -370,9 +380,31 @@ public final class MockTransportService extends TransportService { @Override protected void doRun() throws IOException { - original.sendRequest(node, requestId, action, clonedRequest, options); + if (requestSent.compareAndSet(false, true)) { + original.sendRequest(node, requestId, action, clonedRequest, options); + } } - }); + }; + + // store the request to send it once the rule is cleared. + synchronized (this) { + if (cleared) { + runnable.run(); + } else { + requestsToSendWhenCleared.add(runnable); + threadPool.schedule(delay, ThreadPool.Names.GENERIC, runnable); + } + } + } + + + @Override + public void clearRule() { + synchronized (this) { + assert cleared == false; + cleared = true; + requestsToSendWhenCleared.forEach(Runnable::run); + } } }); } @@ -555,6 +587,23 @@ public final class MockTransportService extends TransportService { } } + /** + * The delegate transport instances defined in this class mock various kinds of disruption types. This subclass adds a method + * {@link #clearRule()} so that when the disruptions are cleared (see {@link #clearRule(TransportService)}) this gives the + * disruption a possibility to run clean-up actions. + */ + public abstract static class ClearableTransport extends DelegateTransport { + + public ClearableTransport(Transport transport) { + super(transport); + } + + /** + * Called by {@link #clearRule(TransportService)} + */ + public abstract void clearRule(); + } + List activeTracers = new CopyOnWriteArrayList<>(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java index f5d46cdd3d6..7473e393e5c 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/SkipSectionParserTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.test.rest.yaml.parser.ClientYamlTestSuiteParseContext; import org.elasticsearch.test.rest.yaml.parser.SkipSectionParser; import org.elasticsearch.test.rest.yaml.section.SkipSection; +import java.util.Arrays; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -108,13 +110,11 @@ public class SkipSectionParserTests extends AbstractParserTestCase { ); SkipSectionParser skipSectionParser = new SkipSectionParser(); - - try { - skipSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); - fail("Expected RestTestParseException"); - } catch (ClientYamlTestParseException e) { - assertThat(e.getMessage(), is("version or features are mutually exclusive")); - } + SkipSection parse = skipSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); + assertEquals(VersionUtils.getFirstVersion(), parse.getLowerVersion()); + assertEquals(Version.fromString("0.90.2"), parse.getUpperVersion()); + assertEquals(Arrays.asList("regex"), parse.getFeatures()); + assertEquals("Delete ignores the parent param", parse.getReason()); } public void testParseSkipSectionNoReason() throws Exception { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java new file mode 100644 index 00000000000..c8f7b351282 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.Version; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; + +public class SkipSectionTests extends ESTestCase { + + public void testSkip() { + SkipSection section = new SkipSection("2.0.0 - 2.1.0", randomBoolean() ? Collections.emptyList() : + Arrays.asList("warnings"), "foobar"); + assertFalse(section.skip(Version.CURRENT)); + assertTrue(section.skip(Version.V_2_0_0)); + section = new SkipSection(randomBoolean() ? null : "2.0.0 - 2.1.0", Arrays.asList("boom"), "foobar"); + assertTrue(section.skip(Version.CURRENT)); + } + + public void testMessage() { + SkipSection section = new SkipSection("2.0.0 - 2.1.0", Arrays.asList("warnings"), "foobar"); + assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); + section = new SkipSection(null, Arrays.asList("warnings"), "foobar"); + assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); + section = new SkipSection(null, Arrays.asList("warnings"), null); + assertEquals("[FOOBAR] skipped, unsupported features [warnings]", section.getSkipMessage("FOOBAR")); + } +}